text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# binary_file_io.py
Basic file I/O blocks for reading and writing data.
"""
import numpy as np
import bifrost.pipeline as bfp
from bifrost.dtype import name_nbit2numpy
from bifrost import telemetry
telemetry.track_module()
class BinaryFileRead(object):
""" Simple file-like reading object for pipeline testing
Args:
filename (str): Name of file to open
dtype (np.dtype or str): datatype of data, e.g. float32. This should be a *numpy* dtype,
not a bifrost.ndarray dtype (eg. float32, not f32)
gulp_size (int): How much data to read per gulp, (i.e. sub-array size)
"""
def __init__(self, filename, gulp_size, dtype):
super(BinaryFileRead, self).__init__()
self.file_obj = open(filename, 'rb')
self.dtype = dtype
self.gulp_size = gulp_size
def read(self):
d = np.fromfile(self.file_obj, dtype=self.dtype, count=self.gulp_size)
return d
def __enter__(self):
return self
def close(self):
pass
def __exit__(self, type, value, tb):
self.close()
class BinaryFileReadBlock(bfp.SourceBlock):
def __init__(self, filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs):
super(BinaryFileReadBlock, self).__init__(filenames, gulp_nframe, *args, **kwargs)
self.dtype = dtype
self.gulp_size = gulp_size
def create_reader(self, filename):
# Do a lookup on bifrost datatype to numpy datatype
dcode = self.dtype.rstrip('0123456789')
nbits = int(self.dtype[len(dcode):])
np_dtype = name_nbit2numpy(dcode, nbits)
return BinaryFileRead(filename, self.gulp_size, np_dtype)
def on_sequence(self, ireader, filename):
ohdr = {
'name': filename,
'_tensor': {
'dtype': self.dtype,
'shape': [-1, self.gulp_size],
'labels': ['streamed', 'gulped'],
'units': [None, None],
'scales': [[0, 1], [0, 1]]
},
}
return [ohdr]
def on_data(self, reader, ospans):
indata = reader.read()
if indata.shape[0] == self.gulp_size:
ospans[0].data[0] = indata
return [1]
else:
return [0]
class BinaryFileWriteBlock(bfp.SinkBlock):
def __init__(self, iring, file_ext='out', *args, **kwargs):
super(BinaryFileWriteBlock, self).__init__(iring, *args, **kwargs)
self.current_fileobj = None
self.file_ext = file_ext
def on_sequence(self, iseq):
if self.current_fileobj is not None:
self.current_fileobj.close()
new_filename = iseq.header['name'] + '.' + self.file_ext
self.current_fileobj = open(new_filename, 'wb')
def on_data(self, ispan):
self.current_fileobj.write(ispan.data.tobytes())
def binary_read(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs):
""" Block for reading binary data from file and streaming it into a bifrost pipeline
Args:
filenames (list): A list of filenames to open
gulp_size (int): Number of elements in a gulp (i.e. sub-array size)
gulp_nframe (int): Number of frames in a gulp. (Ask Ben / Miles for good explanation)
dtype (bifrost dtype string): dtype, e.g. f32, cf32
"""
return BinaryFileReadBlock(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs)
def binary_write(iring, file_ext='out', *args, **kwargs):
""" Write ring data to a binary file
Args:
file_ext (str): Output file extension. Defaults to '.out'
Notes:
output filename is generated from the header 'name' keyword + file_ext
"""
return BinaryFileWriteBlock(iring, file_ext, *args, **kwargs)
|
ledatelescope/bifrost
|
python/bifrost/blocks/binary_io.py
|
Python
|
bsd-3-clause
| 5,322
|
[
"GULP"
] |
705e6a96a2b772f8d3a48ed3f345d597d33527929f0ba6e3de432e9b0ac6184d
|
# -*- coding: utf-8 -*-
""" GIS Module
@requires: U{B{I{gluon}} <http://web2py.com>}
@requires: U{B{I{shapely}} <http://trac.gispython.org/lab/wiki/Shapely>}
@copyright: (c) 2010-2015 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("GIS",
"S3Map",
"S3ExportPOI",
"S3ImportPOI",
)
import datetime # Needed for Feed Refresh checks & web2py version check
import os
import re
import sys
#import logging
import urllib # Needed for urlencoding
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree # Needed to follow NetworkLinks
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.http import HTTP, redirect
from gluon.fileutils import parse_version
from gluon.languages import lazyT, regex_translate
from gluon.storage import Storage
from s3dal import Rows
from s3fields import s3_all_meta_field_names
from s3rest import S3Method
from s3track import S3Trackable
from s3utils import s3_include_ext, s3_unicode
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3GIS: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# Map WKT types to db types
GEOM_TYPES = {"point": 1,
"linestring": 2,
"polygon": 3,
"multipoint": 4,
"multilinestring": 5,
"multipolygon": 6,
"geometrycollection": 7,
}
# km
RADIUS_EARTH = 6371.01
# Compact JSON encoding
SEPARATORS = (",", ":")
# Map Defaults
# Also in static/S3/s3.gis.js
# http://dev.openlayers.org/docs/files/OpenLayers/Strategy/Cluster-js.html
CLUSTER_ATTRIBUTE = "colour"
CLUSTER_DISTANCE = 20 # pixels
CLUSTER_THRESHOLD = 2 # minimum # of features to form a cluster
# Garmin GPS Symbols
GPS_SYMBOLS = ("Airport",
"Amusement Park"
"Ball Park",
"Bank",
"Bar",
"Beach",
"Bell",
"Boat Ramp",
"Bowling",
"Bridge",
"Building",
"Campground",
"Car",
"Car Rental",
"Car Repair",
"Cemetery",
"Church",
"Circle with X",
"City (Capitol)",
"City (Large)",
"City (Medium)",
"City (Small)",
"Civil",
"Contact, Dreadlocks",
"Controlled Area",
"Convenience Store",
"Crossing",
"Dam",
"Danger Area",
"Department Store",
"Diver Down Flag 1",
"Diver Down Flag 2",
"Drinking Water",
"Exit",
"Fast Food",
"Fishing Area",
"Fitness Center",
"Flag",
"Forest",
"Gas Station",
"Geocache",
"Geocache Found",
"Ghost Town",
"Glider Area",
"Golf Course",
"Green Diamond",
"Green Square",
"Heliport",
"Horn",
"Hunting Area",
"Information",
"Levee",
"Light",
"Live Theater",
"Lodging",
"Man Overboard",
"Marina",
"Medical Facility",
"Mile Marker",
"Military",
"Mine",
"Movie Theater",
"Museum",
"Navaid, Amber",
"Navaid, Black",
"Navaid, Blue",
"Navaid, Green",
"Navaid, Green/Red",
"Navaid, Green/White",
"Navaid, Orange",
"Navaid, Red",
"Navaid, Red/Green",
"Navaid, Red/White",
"Navaid, Violet",
"Navaid, White",
"Navaid, White/Green",
"Navaid, White/Red",
"Oil Field",
"Parachute Area",
"Park",
"Parking Area",
"Pharmacy",
"Picnic Area",
"Pizza",
"Post Office",
"Private Field",
"Radio Beacon",
"Red Diamond",
"Red Square",
"Residence",
"Restaurant",
"Restricted Area",
"Restroom",
"RV Park",
"Scales",
"Scenic Area",
"School",
"Seaplane Base",
"Shipwreck",
"Shopping Center",
"Short Tower",
"Shower",
"Skiing Area",
"Skull and Crossbones",
"Soft Field",
"Stadium",
"Summit",
"Swimming Area",
"Tall Tower",
"Telephone",
"Toll Booth",
"TracBack Point",
"Trail Head",
"Truck Stop",
"Tunnel",
"Ultralight Area",
"Water Hydrant",
"Waypoint",
"White Buoy",
"White Dot",
"Zoo"
)
# -----------------------------------------------------------------------------
class GIS(object):
"""
GeoSpatial functions
"""
# Used to disable location tree updates during prepopulate.
# It is not appropriate to use auth.override for this, as there are times
# (e.g. during tests) when auth.override is turned on, but location tree
# updates should still be enabled.
disable_update_location_tree = False
def __init__(self):
messages = current.messages
#messages.centroid_error = str(A("Shapely", _href="http://pypi.python.org/pypi/Shapely/", _target="_blank")) + " library not found, so can't find centroid!"
messages.centroid_error = "Shapely library not functional, so can't find centroid! Install Geos & Shapely for Line/Polygon support"
messages.unknown_type = "Unknown Type!"
messages.invalid_wkt_point = "Invalid WKT: must be like POINT(3 4)"
messages.invalid_wkt = "Invalid WKT: see http://en.wikipedia.org/wiki/Well-known_text"
messages.lon_empty = "Invalid: Longitude can't be empty if Latitude specified!"
messages.lat_empty = "Invalid: Latitude can't be empty if Longitude specified!"
messages.unknown_parent = "Invalid: %(parent_id)s is not a known Location"
self.DEFAULT_SYMBOL = "White Dot"
self.hierarchy_level_keys = ("L0", "L1", "L2", "L3", "L4", "L5")
self.hierarchy_levels = {}
self.max_allowed_level_num = 4
self.relevant_hierarchy_levels = None
# -------------------------------------------------------------------------
@staticmethod
def gps_symbols():
return GPS_SYMBOLS
# -------------------------------------------------------------------------
def download_kml(self, record_id, filename, session_id_name, session_id):
"""
Download a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Save the file to the /uploads folder
Designed to be called asynchronously using:
current.s3task.async("download_kml", [record_id, filename])
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param session_id_name: name of the session
@param session_id: id of the session
@ToDo: Pass error messages to Result & have JavaScript listen for these
"""
request = current.request
table = current.s3db.gis_layer_kml
record = current.db(table.id == record_id).select(table.url,
limitby=(0, 1)
).first()
url = record.url
filepath = os.path.join(request.global_settings.applications_parent,
request.folder,
"uploads",
"gis_cache",
filename)
warning = self.fetch_kml(url, filepath, session_id_name, session_id)
# @ToDo: Handle errors
#query = (cachetable.name == name)
if "URLError" in warning or "HTTPError" in warning:
# URL inaccessible
if os.access(filepath, os.R_OK):
statinfo = os.stat(filepath)
if statinfo.st_size:
# Use cached version
#date = db(query).select(cachetable.modified_on,
# limitby=(0, 1)).first().modified_on
#response.warning += "%s %s %s\n" % (url,
# T("not accessible - using cached version from"),
# str(date))
#url = URL(c="default", f="download",
# args=[filename])
pass
else:
# 0k file is all that is available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# No cached version available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# Download was succesful
#db(query).update(modified_on=request.utcnow)
if "ParseError" in warning:
# @ToDo Parse detail
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("couldn't be parsed so NetworkLinks not followed."))
pass
if "GroundOverlay" in warning or "ScreenOverlay" in warning:
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly."))
# Code to support GroundOverlay:
# https://github.com/openlayers/openlayers/pull/759
pass
# -------------------------------------------------------------------------
def fetch_kml(self, url, filepath, session_id_name, session_id):
"""
Fetch a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Returns a file object
Designed as a helper function for download_kml()
"""
from gluon.tools import fetch
response = current.response
public_url = current.deployment_settings.get_base_public_url()
warning = ""
local = False
if not url.startswith("http"):
local = True
url = "%s%s" % (public_url, url)
elif len(url) > len(public_url) and url[:len(public_url)] == public_url:
local = True
if local:
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[session_id_name] = session_id
# For sync connections
current.session._unlock(response)
try:
file = fetch(url, cookie=cookie)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
else:
try:
file = fetch(url)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
filenames = []
if file[:2] == "PK":
# Unzip
fp = StringIO(file)
import zipfile
myfile = zipfile.ZipFile(fp)
files = myfile.infolist()
main = None
candidates = []
for _file in files:
filename = _file.filename
if filename == "doc.kml":
main = filename
elif filename[-4:] == ".kml":
candidates.append(filename)
if not main:
if candidates:
# Any better way than this to guess which KML file is the main one?
main = candidates[0]
else:
response.error = "KMZ contains no KML Files!"
return ""
# Write files to cache (other than the main one)
request = current.request
path = os.path.join(request.folder, "static", "cache", "kml")
if not os.path.exists(path):
os.makedirs(path)
for _file in files:
filename = _file.filename
if filename != main:
if "/" in filename:
_filename = filename.split("/")
dir = os.path.join(path, _filename[0])
if not os.path.exists(dir):
os.mkdir(dir)
_filepath = os.path.join(path, *_filename)
else:
_filepath = os.path.join(path, filename)
try:
f = open(_filepath, "wb")
except:
# Trying to write the Folder
pass
else:
filenames.append(filename)
__file = myfile.read(filename)
f.write(__file)
f.close()
# Now read the main one (to parse)
file = myfile.read(main)
myfile.close()
# Check for NetworkLink
if "<NetworkLink>" in file:
try:
# Remove extraneous whitespace
parser = etree.XMLParser(recover=True, remove_blank_text=True)
tree = etree.XML(file, parser)
# Find contents of href tag (must be a better way?)
url = ""
for element in tree.iter():
if element.tag == "{%s}href" % KML_NAMESPACE:
url = element.text
if url:
# Follow NetworkLink (synchronously)
warning2 = self.fetch_kml(url, filepath)
warning += warning2
except (etree.XMLSyntaxError,):
e = sys.exc_info()[1]
warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg)
# Check for Overlays
if "<GroundOverlay>" in file:
warning += "GroundOverlay"
if "<ScreenOverlay>" in file:
warning += "ScreenOverlay"
for filename in filenames:
replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]),
filename)
# Rewrite all references to point to the correct place
# need to catch <Icon><href> (which could be done via lxml)
# & also <description><![CDATA[<img src=" (which can't)
file = file.replace(filename, replace)
# Write main file to cache
f = open(filepath, "w")
f.write(file)
f.close()
return warning
# -------------------------------------------------------------------------
@staticmethod
def geocode(address, postcode=None, Lx_ids=None, geocoder="google"):
"""
Geocode an Address
- used by S3LocationSelector
settings.get_gis_geocode_imported_addresses
@param address: street address
@param postcode: postcode
@param Lx_ids: list of ancestor IDs
@param geocoder: which geocoder service to use
"""
from geopy import geocoders
if geocoder == "google":
g = geocoders.GoogleV3()
elif geocoder == "yahoo":
apikey = current.deployment_settings.get_gis_api_yahoo()
g = geocoders.Yahoo(apikey)
else:
# @ToDo
raise NotImplementedError
location = address
if postcode:
location = "%s,%s" % (location, postcode)
Lx = L5 = L4 = L3 = L2 = L1 = L0 = None
if Lx_ids:
# Convert Lx IDs to Names
table = current.s3db.gis_location
limit = len(Lx_ids)
if limit > 1:
query = (table.id.belongs(Lx_ids))
else:
query = (table.id == Lx_ids[0])
db = current.db
Lx = db(query).select(table.id,
table.name,
table.level,
table.gis_feature_type,
# Better as separate query
#table.lon_min,
#table.lat_min,
#table.lon_max,
#table.lat_max,
# Better as separate query
#table.wkt,
limitby=(0, limit),
orderby=~table.level
)
if Lx:
Lx_names = ",".join([l.name for l in Lx])
location = "%s,%s" % (location, Lx_names)
for l in Lx:
if l.level == "L0":
L0 = l.id
continue
elif l.level == "L1":
L1 = l.id
continue
elif l.level == "L2":
L2 = l.id
continue
elif l.level == "L3":
L3 = l.id
continue
elif l.level == "L4":
L4 = l.id
continue
elif l.level == "L5":
L5 = l.id
Lx = Lx.as_dict()
try:
results = g.geocode(location, exactly_one=False)
if len(results) == 1:
place, (lat, lon) = results[0]
if Lx:
output = None
# Check Results are for a specific address & not just that for the City
results = g.geocode(Lx_names, exactly_one=False)
if not results:
output = "Can't check that these results are specific enough"
for result in results:
place2, (lat2, lon2) = result
if place == place2:
output = "We can only geocode to the Lx"
break
if not output:
# Check Results are within relevant bounds
L0_row = None
wkt = None
if L5 and Lx[L5]["gis_feature_type"] != 1:
wkt = db(table.id == L5).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L5
elif L4 and Lx[L4]["gis_feature_type"] != 1:
wkt = db(table.id == L4).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L4
elif L3 and Lx[L3]["gis_feature_type"] != 1:
wkt = db(table.id == L3).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L3
elif L2 and Lx[L2]["gis_feature_type"] != 1:
wkt = db(table.id == L2).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L2
elif L1 and Lx[L1]["gis_feature_type"] != 1:
wkt = db(table.id == L1).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L1
elif L0:
L0_row = db(table.id == L0).select(table.wkt,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
if not L0_row.wkt.startswith("POI"): # Point
wkt = L0_row.wkt
used_Lx = L0
if wkt:
from shapely.geometry import point
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
test = point.Point(lon, lat)
shape = wkt_loads(wkt)
ok = test.intersects(shape)
if not ok:
output = "Returned value not within %s" % Lx[used_Lx]["name"]
elif L0:
# Check within country at least
if not L0_row:
L0_row = db(table.id == L0).select(table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
if lat < L0_row["lat_max"] and \
lat > L0_row["lat_min"] and \
lon < L0_row["lon_max"] and \
lon > L0_row["lon_min"]:
ok = True
else:
ok = False
output = "Returned value not within %s" % Lx["name"]
else:
# We'll just have to trust it!
ok = True
if ok:
output = dict(lat=lat, lon=lon)
else:
# We'll just have to trust it!
output = dict(lat=lat, lon=lon)
elif len(results):
output = "Multiple results found"
# @ToDo: Iterate through the results to see if just 1 is within the right bounds
else:
output = "No results found"
except:
error = sys.exc_info()[1]
output = str(error)
return output
# -------------------------------------------------------------------------
@staticmethod
def geocode_r(lat, lon):
"""
Geocode an Address
- used by S3LocationSelector
settings.get_gis_geocode_imported_addresses
@param address: street address
@param postcode: postcode
@param Lx_ids: list of ancestor IDs
@param geocoder: which geocoder service to use
"""
if not lat or not lon:
return "Need Lat & Lon"
results = ""
# Check vaguely valid
try:
lat = float(lat)
except ValueError:
results = "Latitude is Invalid!"
try:
lon = float(lon)
except ValueError:
results += "Longitude is Invalid!"
if not results:
if lon > 180 or lon < -180:
results = "Longitude must be between -180 & 180!"
elif lat > 90 or lat < -90:
results = "Latitude must be between -90 & 90!"
else:
table = current.s3db.gis_location
query = (table.level != None) & \
(table.deleted != True)
if current.deployment_settings.get_gis_spatialdb():
point = "POINT(%s %s)" % (lon, lat)
query &= (table.the_geom.st_intersects(point))
rows = current.db(query).select(table.id,
table.level,
)
results = {}
for row in rows:
results[row.level] = row.id
else:
# Oh dear, this is going to be slow :/
# Filter to the BBOX initially
query &= (table.lat_min < lat) & \
(table.lat_max > lat) & \
(table.lon_min < lon) & \
(table.lon_max > lon)
rows = current.db(query).select(table.id,
table.level,
table.wkt,
)
from shapely.geometry import point
from shapely.wkt import loads as wkt_loads
test = point.Point(lon, lat)
results = {}
for row in rows:
shape = wkt_loads(row.wkt)
ok = test.intersects(shape)
if ok:
#print "Level: %s, id: %s" % (row.level, row.id)
results[row.level] = row.id
return results
# -------------------------------------------------------------------------
@staticmethod
def get_bearing(lat_start, lon_start, lat_end, lon_end):
"""
Given a Start & End set of Coordinates, return a Bearing
Formula from: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
delta_lon = lon_start - lon_end
bearing = math.atan2(sin(delta_lon) * cos(lat_end),
(cos(lat_start) * sin(lat_end)) - \
(sin(lat_start) * cos(lat_end) * cos(delta_lon))
)
# Convert to a compass bearing
bearing = (bearing + 360) % 360
return bearing
# -------------------------------------------------------------------------
def get_bounds(self, features=None, parent=None,
bbox_min_size = 0.05, bbox_inset = 0.007):
"""
Calculate the Bounds of a list of Point Features, suitable for
setting map bounds. If no features are supplied, the current map
configuration bounds will be returned.
e.g. When a map is displayed that focuses on a collection of points,
the map is zoomed to show just the region bounding the points.
e.g. To use in GPX export for correct zooming
`
Ensure a minimum size of bounding box, and that the points
are inset from the border.
@param features: A list of point features
@param bbox_min_size: Minimum bounding box - gives a minimum width
and height in degrees for the region shown.
Without this, a map showing a single point would not show any
extent around that point.
@param bbox_inset: Bounding box insets - adds a small amount of
distance outside the points.
Without this, the outermost points would be on the bounding
box, and might not be visible.
@return: An appropriate map bounding box, as a dict:
dict(lon_min=lon_min, lat_min=lat_min,
lon_max=lon_max, lat_max=lat_max)
@ToDo: Support Polygons (separate function?)
"""
if features:
lon_min = 180
lat_min = 90
lon_max = -180
lat_max = -90
# Is this a simple feature set or the result of a join?
try:
lon = features[0].lon
simple = True
except (AttributeError, KeyError):
simple = False
# @ToDo: Optimised Geospatial routines rather than this crude hack
for feature in features:
try:
if simple:
lon = feature.lon
lat = feature.lat
else:
# A Join
lon = feature.gis_location.lon
lat = feature.gis_location.lat
except AttributeError:
# Skip any rows without the necessary lat/lon fields
continue
# Also skip those set to None. Note must use explicit test,
# as zero is a legal value.
if lon is None or lat is None:
continue
lon_min = min(lon, lon_min)
lat_min = min(lat, lat_min)
lon_max = max(lon, lon_max)
lat_max = max(lat, lat_max)
# Assure a reasonable-sized box.
delta_lon = (bbox_min_size - (lon_max - lon_min)) / 2.0
if delta_lon > 0:
lon_min -= delta_lon
lon_max += delta_lon
delta_lat = (bbox_min_size - (lat_max - lat_min)) / 2.0
if delta_lat > 0:
lat_min -= delta_lat
lat_max += delta_lat
# Move bounds outward by specified inset.
lon_min -= bbox_inset
lon_max += bbox_inset
lat_min -= bbox_inset
lat_max += bbox_inset
else:
# no features
config = GIS.get_config()
if config.lat_min is not None:
lat_min = config.lat_min
else:
lat_min = -90
if config.lon_min is not None:
lon_min = config.lon_min
else:
lon_min = -180
if config.lat_max is not None:
lat_max = config.lat_max
else:
lat_max = 90
if config.lon_max is not None:
lon_max = config.lon_max
else:
lon_max = 180
return dict(lon_min=lon_min, lat_min=lat_min,
lon_max=lon_max, lat_max=lat_max)
# -------------------------------------------------------------------------
def get_parent_bounds(self, parent=None):
"""
Get bounds from the specified (parent) location and its ancestors.
This is used to validate lat, lon, and bounds for child locations.
Caution: This calls update_location_tree if the parent bounds are
not set. During prepopulate, update_location_tree is disabled,
so unless the parent contains its own bounds (i.e. they do not need
to be propagated down from its ancestors), this will not provide a
check on location nesting. Prepopulate data should be prepared to
be correct. A set of candidate prepopulate data can be tested by
importing after prepopulate is run.
@param parent: A location_id to provide bounds suitable
for validating child locations
@return: bounding box and parent location name, as a list:
[lat_min, lon_min, lat_max, lon_max, parent_name]
@ToDo: Support Polygons (separate function?)
"""
table = current.s3db.gis_location
db = current.db
parent = db(table.id == parent).select(table.id,
table.level,
table.name,
table.parent,
table.path,
table.lon,
table.lat,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
if parent.lon_min is None or \
parent.lon_max is None or \
parent.lat_min is None or \
parent.lat_max is None or \
parent.lon_min == parent.lon_max or \
parent.lat_min == parent.lat_max:
# This is unsuitable - try higher parent
if parent.level == "L1":
if parent.parent:
# We can trust that L0 should have the data from prepop
L0 = db(table.id == parent.parent).select(table.name,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
return L0.lat_min, L0.lon_min, L0.lat_max, L0.lon_max, L0.name
if parent.path:
path = parent.path
else:
# This will return None during prepopulate.
path = GIS.update_location_tree(dict(id=parent.id,
level=parent.level))
if path:
path_list = map(int, path.split("/"))
rows = db(table.id.belongs(path_list)).select(table.level,
table.name,
table.lat,
table.lon,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
orderby=table.level)
row_list = rows.as_list()
row_list.reverse()
ok = False
for row in row_list:
if row["lon_min"] is not None and row["lon_max"] is not None and \
row["lat_min"] is not None and row["lat_max"] is not None and \
row["lon"] != row["lon_min"] != row["lon_max"] and \
row["lat"] != row["lat_min"] != row["lat_max"]:
ok = True
break
if ok:
# This level is suitable
return row["lat_min"], row["lon_min"], row["lat_max"], row["lon_max"], row["name"]
else:
# This level is suitable
return parent.lat_min, parent.lon_min, parent.lat_max, parent.lon_max, parent.name
# No ancestor bounds available -- use the active gis_config.
config = GIS.get_config()
if config:
return config.lat_min, config.lon_min, config.lat_max, config.lon_max, None
# Last resort -- fall back to no restriction.
return -90, -180, 90, 180, None
# -------------------------------------------------------------------------
@staticmethod
def _lookup_parent_path(feature_id):
"""
Helper that gets parent and path for a location.
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.name,
table.level,
table.path,
table.parent,
limitby=(0, 1)).first()
return feature
# -------------------------------------------------------------------------
@staticmethod
def get_children(id, level=None):
"""
Return a list of IDs of all GIS Features which are children of
the requested feature, using Materialized path for retrieving
the children
This has been chosen over Modified Preorder Tree Traversal for
greater efficiency:
http://eden.sahanafoundation.org/wiki/HaitiGISToDo#HierarchicalTrees
@param: level - optionally filter by level
@return: Rows object containing IDs & Names
Note: This does NOT include the parent location itself
"""
db = current.db
try:
table = db.gis_location
except:
# Being run from CLI for debugging
table = current.s3db.gis_location
query = (table.deleted == False)
if level:
query &= (table.level == level)
term = str(id)
path = table.path
query &= ((path.like(term + "/%")) | \
(path.like("%/" + term + "/%")))
children = db(query).select(table.id,
table.name)
return children
# -------------------------------------------------------------------------
@staticmethod
def get_parents(feature_id, feature=None, ids_only=False):
"""
Returns a list containing ancestors of the requested feature.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If ids_only is false, each element in the list is a gluon.sql.Row
containing the gis_location record of an ancestor of the specified
location.
If ids_only is true, just returns a list of ids of the parents.
This avoids a db lookup for the parents if the specified feature
has a path.
List elements are in the opposite order as the location path and
exclude the specified location itself, i.e. element 0 is the parent
and the last element is the most distant ancestor.
Assists lazy update of a database without location paths by calling
update_location_tree to get the path.
Note that during prepopulate, update_location_tree is disabled,
in which case this will only return the immediate parent.
"""
if not feature or "path" not in feature or "parent" not in feature:
feature = GIS._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = GIS.update_location_tree(feature)
if path:
path_list = map(int, path.split("/"))
if len(path_list) == 1:
# No parents - path contains only this feature.
return None
# Get only ancestors
path_list = path_list[:-1]
# Get path in the desired -- reversed -- order.
path_list.reverse()
elif feature.parent:
path_list = [feature.parent]
else:
return None
# If only ids are wanted, stop here.
if ids_only:
return path_list
# Retrieve parents - order in which they're returned is arbitrary.
s3db = current.s3db
table = s3db.gis_location
query = (table.id.belongs(path_list))
fields = [table.id, table.name, table.level, table.lat, table.lon]
unordered_parents = current.db(query).select(cache=s3db.cache,
*fields)
# Reorder parents in order of reversed path.
unordered_ids = [row.id for row in unordered_parents]
parents = [unordered_parents[unordered_ids.index(path_id)]
for path_id in path_list if path_id in unordered_ids]
return parents
else:
return None
# -------------------------------------------------------------------------
def get_parent_per_level(self, results, feature_id,
feature=None,
ids=True,
names=True):
"""
Adds ancestor of requested feature for each level to supplied dict.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If a dict is not supplied in results, one is created. The results
dict is returned in either case.
If ids=True and names=False (used by old S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.id
If ids=False and names=True (used by address_onvalidation):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.name
If ids=True and names=True (used by new S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : {name : ancestor.name, id: ancestor.id}
"""
if not results:
results = {}
_id = feature_id
# if we don't have a feature or a feature ID return the dict as-is
if not feature_id and not feature:
return results
if not feature_id and "path" not in feature and "parent" in feature:
# gis_location_onvalidation on a Create => no ID yet
# Read the Parent's path instead
feature = self._lookup_parent_path(feature.parent)
_id = feature.id
elif not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
# Get ids of ancestors at each level.
if feature.parent:
strict = self.get_strict_hierarchy(feature.parent)
else:
strict = self.get_strict_hierarchy(_id)
if path and strict and not names:
# No need to do a db lookup for parents in this case -- we
# know the levels of the parents from their position in path.
# Note ids returned from db are ints, not strings, so be
# consistent with that.
path_ids = map(int, path.split("/"))
# This skips the last path element, which is the supplied
# location.
for (i, _id) in enumerate(path_ids[:-1]):
results["L%i" % i] = _id
elif path:
ancestors = self.get_parents(_id, feature=feature)
if ancestors:
for ancestor in ancestors:
if ancestor.level and ancestor.level in self.hierarchy_level_keys:
if names and ids:
results[ancestor.level] = Storage()
results[ancestor.level].name = ancestor.name
results[ancestor.level].id = ancestor.id
elif names:
results[ancestor.level] = ancestor.name
else:
results[ancestor.level] = ancestor.id
if not feature_id:
# Add the Parent in (we only need the version required for gis_location onvalidation here)
results[feature.level] = feature.name
if names:
# We need to have entries for all levels
# (both for address onvalidation & new LocationSelector)
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if not results.has_key(key):
results[key] = None
return results
# -------------------------------------------------------------------------
def update_table_hierarchy_labels(self, tablename=None):
"""
Re-set table options that depend on location_hierarchy
Only update tables which are already defined
"""
levels = ("L1", "L2", "L3", "L4", "L5")
labels = self.get_location_hierarchy()
db = current.db
if tablename and tablename in db:
# Update the specific table which has just been defined
table = db[tablename]
if tablename == "gis_location":
labels["L0"] = current.messages.COUNTRY
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
else:
for level in levels:
table[level].label = labels[level]
else:
# Do all Tables which are already defined
# gis_location
if "gis_location" in db:
table = db.gis_location
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
# These tables store location hierarchy info for XSLT export.
# Labels are used for PDF & XLS Reports
tables = ["org_office",
#"pr_person",
"pr_address",
"cr_shelter",
"asset_asset",
#"hms_hospital",
]
for tablename in tables:
if tablename in db:
table = db[tablename]
for level in levels:
table[level].label = labels[level]
# -------------------------------------------------------------------------
@staticmethod
def set_config(config_id=None, force_update_cache=False):
"""
Reads the specified GIS config from the DB, caches it in response.
Passing in a false or non-existent id will cause the personal config,
if any, to be used, else the site config (uuid SITE_DEFAULT), else
their fallback values defined in this class.
If force_update_cache is true, the config will be read and cached in
response even if the specified config is the same as what's already
cached. Used when the config was just written.
The config itself will be available in response.s3.gis.config.
Scalar fields from the gis_config record and its linked
gis_projection record have the same names as the fields in their
tables and can be accessed as response.s3.gis.<fieldname>.
Returns the id of the config it actually used, if any.
@param: config_id. use '0' to set the SITE_DEFAULT
@ToDo: Merge configs for Event
"""
_gis = current.response.s3.gis
# If an id has been supplied, try it first. If it matches what's in
# response, there's no work to do.
if config_id and not force_update_cache and \
_gis.config and \
_gis.config.id == config_id:
return
db = current.db
s3db = current.s3db
ctable = s3db.gis_config
mtable = s3db.gis_marker
ptable = s3db.gis_projection
stable = s3db.gis_style
fields = (ctable.id,
ctable.default_location_id,
ctable.region_location_id,
ctable.geocoder,
ctable.lat_min,
ctable.lat_max,
ctable.lon_min,
ctable.lon_max,
ctable.zoom,
ctable.lat,
ctable.lon,
ctable.pe_id,
ctable.wmsbrowser_url,
ctable.wmsbrowser_name,
ctable.zoom_levels,
mtable.image,
mtable.height,
mtable.width,
ptable.epsg,
ptable.proj4js,
ptable.maxExtent,
ptable.units,
)
cache = Storage()
row = None
rows = None
if config_id:
# Merge this one with the Site Default
query = (ctable.id == config_id) | \
(ctable.uuid == "SITE_DEFAULT")
# May well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type,
limitby=(0, 2))
if len(rows) == 1:
# The requested config must be invalid, so just use site default
row = rows.first()
elif config_id is 0:
# Use site default
query = (ctable.uuid == "SITE_DEFAULT")
# May well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
# If no id supplied, extend the site config with any personal or OU configs
if not rows and not row:
auth = current.auth
if auth.is_logged_in():
# Read personalised config, if available.
user = auth.user
pe_id = user.pe_id
# Also look for OU configs
pes = []
if user.organisation_id:
# Add the user account's Org to the list
# (Will take lower-priority than Personal)
otable = s3db.org_organisation
org = db(otable.id == user.organisation_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pes.append(org.pe_id)
except:
current.log.warning("Unable to find Org %s" % user.organisation_id)
if current.deployment_settings.get_org_branches():
# Also look for Parent Orgs
ancestors = s3db.pr_get_ancestors(org.pe_id)
pes += ancestors
if user.site_id:
# Add the user account's Site to the list
# (Will take lower-priority than Org/Personal)
site_pe_id = s3db.pr_get_pe_id("org_site", user.site_id)
if site_pe_id:
pes.append(site_pe_id)
if user.org_group_id:
# Add the user account's Org Group to the list
# (Will take lower-priority than Site/Org/Personal)
ogtable = s3db.org_group
ogroup = db(ogtable.id == user.org_group_id).select(ogtable.pe_id,
limitby=(0, 1)
).first()
pes = list(pes)
try:
pes.append(ogroup.pe_id)
except:
current.log.warning("Unable to find Org Group %s" % user.org_group_id)
query = (ctable.uuid == "SITE_DEFAULT") | \
((ctable.pe_id == pe_id) & \
(ctable.pe_default != False))
if len(pes) == 1:
query |= (ctable.pe_id == pes[0])
else:
query |= (ctable.pe_id.belongs(pes))
# Personal/OU may well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
# Order by pe_type (defined in gis_config)
# @ToDo: Sort orgs from the hierarchy?
# (Currently we just have branch > non-branch in pe_type)
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type)
if len(rows) == 1:
row = rows.first()
if rows and not row:
# Merge Configs
cache["ids"] = []
for row in rows:
config = row["gis_config"]
if not config_id:
config_id = config.id
cache["ids"].append(config.id)
for key in config:
if key in ["delete_record", "gis_layer_config", "gis_menu", "update_record"]:
continue
if key not in cache or cache[key] is None:
cache[key] = config[key]
if "epsg" not in cache or cache["epsg"] is None:
projection = row["gis_projection"]
for key in ["epsg", "units", "maxExtent", "proj4js"]:
cache[key] = projection[key] if key in projection \
else None
if "marker_image" not in cache or \
cache["marker_image"] is None:
marker = row["gis_marker"]
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Add NULL values for any that aren't defined, to avoid KeyErrors
for key in ["epsg", "units", "proj4js", "maxExtent",
"marker_image", "marker_height", "marker_width",
]:
if key not in cache:
cache[key] = None
if not row:
# No personal config or not logged in. Use site default.
query = (ctable.uuid == "SITE_DEFAULT") & \
(mtable.id == stable.marker_id) & \
(stable.config_id == ctable.id) & \
(stable.layer_id == None) & \
(ptable.id == ctable.projection_id)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
if not cache:
# We had a single row
config = row["gis_config"]
config_id = config.id
cache["ids"] = [config_id]
projection = row["gis_projection"]
marker = row["gis_marker"]
for key in config:
cache[key] = config[key]
for key in ["epsg", "maxExtent", "proj4js", "units"]:
cache[key] = projection[key] if key in projection else None
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Store the values
_gis.config = cache
return cache
# -------------------------------------------------------------------------
@staticmethod
def get_config():
"""
Returns the current GIS config structure.
@ToDo: Config() class
"""
_gis = current.response.s3.gis
if not _gis.config:
# Ask set_config to put the appropriate config in response.
if current.session.s3.gis_config_id:
GIS.set_config(current.session.s3.gis_config_id)
else:
GIS.set_config()
return _gis.config
# -------------------------------------------------------------------------
def get_location_hierarchy(self, level=None, location=None):
"""
Returns the location hierarchy and it's labels
@param: level - a specific level for which to lookup the label
@param: location - the location_id to lookup the location for
currently only the actual location is supported
@ToDo: Do a search of parents to allow this
lookup for any location
"""
_levels = self.hierarchy_levels
_location = location
if not location and _levels:
# Use cached value
if level:
if level in _levels:
return _levels[level]
else:
return level
else:
return _levels
COUNTRY = current.messages.COUNTRY
if level == "L0":
return COUNTRY
db = current.db
s3db = current.s3db
table = s3db.gis_hierarchy
fields = (table.uuid,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
)
query = (table.uuid == "SITE_DEFAULT")
if not location:
config = GIS.get_config()
location = config.region_location_id
if location:
# Try the Region, but ensure we have the fallback available in a single query
query = query | (table.location_id == location)
rows = db(query).select(cache=s3db.cache,
*fields)
if len(rows) > 1:
# Remove the Site Default
_filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(_filter)
elif not rows:
# prepop hasn't run yet
if level:
return level
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
else:
levels[key] = key
return levels
T = current.T
row = rows.first()
if level:
try:
return T(row[level])
except:
return level
else:
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
elif key in row and row[key]:
# Only include rows with values
levels[key] = str(T(row[key]))
if not _location:
# Cache the value
self.hierarchy_levels = levels
if level:
return levels[level]
else:
return levels
# -------------------------------------------------------------------------
def get_strict_hierarchy(self, location=None):
"""
Returns the strict hierarchy value from the current config.
@param: location - the location_id of the record to check
"""
s3db = current.s3db
table = s3db.gis_hierarchy
# Read the system default
# @ToDo: Check for an active gis_config region?
query = (table.uuid == "SITE_DEFAULT")
if location:
# Try the Location's Country, but ensure we have the fallback available in a single query
query = query | (table.location_id == self.get_parent_country(location))
rows = current.db(query).select(table.uuid,
table.strict_hierarchy,
cache=s3db.cache)
if len(rows) > 1:
# Remove the Site Default
_filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(_filter)
row = rows.first()
if row:
strict = row.strict_hierarchy
else:
# Pre-pop hasn't run yet
return False
return strict
# -------------------------------------------------------------------------
def get_max_hierarchy_level(self):
"""
Returns the deepest level key (i.e. Ln) in the current hierarchy.
- used by gis_location_onvalidation()
"""
location_hierarchy = self.get_location_hierarchy()
return max(location_hierarchy)
# -------------------------------------------------------------------------
def get_all_current_levels(self, level=None):
"""
Get the current hierarchy levels plus non-hierarchy levels.
"""
all_levels = OrderedDict()
all_levels.update(self.get_location_hierarchy())
#T = current.T
#all_levels["GR"] = T("Location Group")
#all_levels["XX"] = T("Imported")
if level:
try:
return all_levels[level]
except Exception, e:
return level
else:
return all_levels
# -------------------------------------------------------------------------
def get_relevant_hierarchy_levels(self, as_dict=False):
"""
Get current location hierarchy levels relevant for the user
"""
levels = self.relevant_hierarchy_levels
if not levels:
levels = OrderedDict(self.get_location_hierarchy())
if len(current.deployment_settings.get_gis_countries()) == 1 or \
current.response.s3.gis.config.region_location_id:
levels.pop("L0", None)
self.relevant_hierarchy_levels = levels
if not as_dict:
return levels.keys()
else:
return levels
# -------------------------------------------------------------------------
@staticmethod
def get_countries(key_type="id"):
"""
Returns country code or L0 location id versus name for all countries.
The lookup is cached in the session
If key_type is "code", these are returned as an OrderedDict with
country code as the key. If key_type is "id", then the location id
is the key. In all cases, the value is the name.
"""
session = current.session
if "gis" not in session:
session.gis = Storage()
gis = session.gis
if gis.countries_by_id:
cached = True
else:
cached = False
if not cached:
s3db = current.s3db
table = s3db.gis_location
ttable = s3db.gis_location_tag
query = (table.level == "L0") & \
(ttable.tag == "ISO2") & \
(ttable.location_id == table.id)
countries = current.db(query).select(table.id,
table.name,
ttable.value,
orderby=table.name)
if not countries:
return []
countries_by_id = OrderedDict()
countries_by_code = OrderedDict()
for row in countries:
location = row["gis_location"]
countries_by_id[location.id] = location.name
countries_by_code[row["gis_location_tag"].value] = location.name
# Cache in the session
gis.countries_by_id = countries_by_id
gis.countries_by_code = countries_by_code
if key_type == "id":
return countries_by_id
else:
return countries_by_code
elif key_type == "id":
return gis.countries_by_id
else:
return gis.countries_by_code
# -------------------------------------------------------------------------
@staticmethod
def get_country(key, key_type="id"):
"""
Returns country name for given code or id from L0 locations.
The key can be either location id or country code, as specified
by key_type.
"""
if key:
if current.gis.get_countries(key_type):
if key_type == "id":
return current.session.gis.countries_by_id[key]
else:
return current.session.gis.countries_by_code[key]
return None
# -------------------------------------------------------------------------
def get_parent_country(self, location, key_type="id"):
"""
Returns the parent country for a given record
@param: location: the location or id to search for
@param: key_type: whether to return an id or code
@ToDo: Optimise to not use try/except
"""
if not location:
return None
db = current.db
s3db = current.s3db
# @ToDo: Avoid try/except here!
# - separate parameters best as even isinstance is expensive
try:
# location is passed as integer (location_id)
table = s3db.gis_location
location = db(table.id == location).select(table.id,
table.path,
table.level,
limitby=(0, 1),
cache=s3db.cache).first()
except:
# location is passed as record
pass
if location.level == "L0":
if key_type == "id":
return location.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == location.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
else:
parents = self.get_parents(location.id,
feature=location)
if parents:
for row in parents:
if row.level == "L0":
if key_type == "id":
return row.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == row.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
return None
# -------------------------------------------------------------------------
def get_default_country(self, key_type="id"):
"""
Returns the default country for the active gis_config
@param: key_type: whether to return an id or code
"""
config = GIS.get_config()
if config.default_location_id:
return self.get_parent_country(config.default_location_id,
key_type=key_type)
return None
# -------------------------------------------------------------------------
def get_features_in_polygon(self, location, tablename=None, category=None):
"""
Returns a gluon.sql.Rows of Features within a Polygon.
The Polygon can be either a WKT string or the ID of a record in the
gis_location table
Currently unused.
@ToDo: Optimise to not use try/except
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
locations = s3db.gis_location
try:
location_id = int(location)
# Check that the location is a polygon
location = db(locations.id == location_id).select(locations.wkt,
locations.lon_min,
locations.lon_max,
locations.lat_min,
locations.lat_max,
limitby=(0, 1)
).first()
if location:
wkt = location.wkt
if wkt and (wkt.startswith("POLYGON") or \
wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = location.lon_min
lon_max = location.lon_max
lat_min = location.lat_min
lat_max = location.lat_max
else:
current.log.error("Location searched within isn't a Polygon!")
return None
except: # @ToDo: need specific exception
wkt = location
if (wkt.startswith("POLYGON") or wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = None
else:
current.log.error("This isn't a Polygon!")
return None
try:
polygon = wkt_loads(wkt)
except: # @ToDo: need specific exception
current.log.error("Invalid Polygon!")
return None
table = s3db[tablename]
if "location_id" not in table.fields():
# @ToDo: Add any special cases to be able to find the linked location
current.log.error("This table doesn't have a location_id!")
return None
query = (table.location_id == locations.id)
if "deleted" in table.fields:
query &= (table.deleted == False)
# @ToDo: Check AAA (do this as a resource filter?)
features = db(query).select(locations.wkt,
locations.lat,
locations.lon,
table.ALL)
output = Rows()
# @ToDo: provide option to use PostGIS/Spatialite
# settings = current.deployment_settings
# if settings.gis.spatialdb and settings.database.db_type == "postgres":
if lon_min is None:
# We have no BBOX so go straight to the full geometry check
for row in features:
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
current.log.error("Error reading wkt of location with id",
value=row.id)
else:
# 1st check for Features included within the bbox (faster)
def in_bbox(row):
_location = row.gis_location
return (_location.lon > lon_min) & \
(_location.lon < lon_max) & \
(_location.lat > lat_min) & \
(_location.lat < lat_max)
for row in features.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
current.log.error("Error reading wkt of location with id",
value = row.id)
return output
# -------------------------------------------------------------------------
@staticmethod
def get_polygon_from_bounds(bbox):
"""
Given a gis_location record or a bounding box dict with keys
lon_min, lon_max, lat_min, lat_max, construct a WKT polygon with
points at the corners.
"""
lon_min = bbox["lon_min"]
lon_max = bbox["lon_max"]
lat_min = bbox["lat_min"]
lat_max = bbox["lat_max"]
# Take the points in a counterclockwise direction.
points = [(lon_min, lat_min),
(lon_min, lat_max),
(lon_max, lat_max),
(lon_min, lat_max),
(lon_min, lat_min)]
pairs = ["%s %s" % (p[0], p[1]) for p in points]
wkt = "POLYGON ((%s))" % ", ".join(pairs)
return wkt
# -------------------------------------------------------------------------
@staticmethod
def get_bounds_from_radius(lat, lon, radius):
"""
Compute a bounding box given a Radius (in km) of a LatLon Location
Note the order of the parameters.
@return a dict containing the bounds with keys min_lon, max_lon,
min_lat, max_lat
See:
http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
"""
import math
radians = math.radians
degrees = math.degrees
MIN_LAT = radians(-90) # -PI/2
MAX_LAT = radians(90) # PI/2
MIN_LON = radians(-180) # -PI
MAX_LON = radians(180) # PI
# Convert to radians for the calculation
r = float(radius) / RADIUS_EARTH
radLat = radians(lat)
radLon = radians(lon)
# Calculate the bounding box
minLat = radLat - r
maxLat = radLat + r
if (minLat > MIN_LAT) and (maxLat < MAX_LAT):
deltaLon = math.asin(math.sin(r) / math.cos(radLat))
minLon = radLon - deltaLon
if (minLon < MIN_LON):
minLon += 2 * math.pi
maxLon = radLon + deltaLon
if (maxLon > MAX_LON):
maxLon -= 2 * math.pi
else:
# Special care for Poles & 180 Meridian:
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates#PolesAnd180thMeridian
minLat = max(minLat, MIN_LAT)
maxLat = min(maxLat, MAX_LAT)
minLon = MIN_LON
maxLon = MAX_LON
# Convert back to degrees
minLat = degrees(minLat)
minLon = degrees(minLon)
maxLat = degrees(maxLat)
maxLon = degrees(maxLon)
return dict(lat_min = minLat,
lat_max = maxLat,
lon_min = minLon,
lon_max = maxLon)
# -------------------------------------------------------------------------
def get_features_in_radius(self, lat, lon, radius, tablename=None, category=None):
"""
Returns Features within a Radius (in km) of a LatLon Location
Unused
"""
import math
db = current.db
settings = current.deployment_settings
if settings.gis.spatialdb and settings.database.db_type == "postgres":
# Use PostGIS routine
# The ST_DWithin function call will automatically include a bounding box comparison that will make use of any indexes that are available on the geometries.
# @ToDo: Support optional Category (make this a generic filter?)
import psycopg2
import psycopg2.extras
dbname = settings.database.database
username = settings.database.username
password = settings.database.password
host = settings.database.host
port = settings.database.port or "5432"
# Convert km to degrees (since we're using the_geom not the_geog)
radius = math.degrees(float(radius) / RADIUS_EARTH)
connection = psycopg2.connect("dbname=%s user=%s password=%s host=%s port=%s" % (dbname, username, password, host, port))
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
info_string = "SELECT column_name, udt_name FROM information_schema.columns WHERE table_name = 'gis_location' or table_name = '%s';" % tablename
cursor.execute(info_string)
# @ToDo: Look at more optimal queries for just those fields we need
if tablename:
# Lookup the resource
query_string = cursor.mogrify("SELECT * FROM gis_location, %s WHERE %s.location_id = gis_location.id and ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (tablename, tablename, lat, lon, radius))
else:
# Lookup the raw Locations
query_string = cursor.mogrify("SELECT * FROM gis_location WHERE ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (lat, lon, radius))
cursor.execute(query_string)
# @ToDo: Export Rows?
features = []
for record in cursor:
d = dict(record.items())
row = Storage()
# @ToDo: Optional support for Polygons
if tablename:
row.gis_location = Storage()
row.gis_location.id = d["id"]
row.gis_location.lat = d["lat"]
row.gis_location.lon = d["lon"]
row.gis_location.lat_min = d["lat_min"]
row.gis_location.lon_min = d["lon_min"]
row.gis_location.lat_max = d["lat_max"]
row.gis_location.lon_max = d["lon_max"]
row[tablename] = Storage()
row[tablename].id = d["id"]
row[tablename].name = d["name"]
else:
row.name = d["name"]
row.id = d["id"]
row.lat = d["lat"]
row.lon = d["lon"]
row.lat_min = d["lat_min"]
row.lon_min = d["lon_min"]
row.lat_max = d["lat_max"]
row.lon_max = d["lon_max"]
features.append(row)
return features
#elif settings.database.db_type == "mysql":
# Do the calculation in MySQL to pull back only the relevant rows
# Raw MySQL Formula from: http://blog.peoplesdns.com/archives/24
# PI = 3.141592653589793, mysql's pi() function returns 3.141593
#pi = math.pi
#query = """SELECT name, lat, lon, acos(SIN( PI()* 40.7383040 /180 )*SIN( PI()*lat/180 ))+(cos(PI()* 40.7383040 /180)*COS( PI()*lat/180) *COS(PI()*lon/180-PI()* -73.99319 /180))* 3963.191
#AS distance
#FROM gis_location
#WHERE 1=1
#AND 3963.191 * ACOS( (SIN(PI()* 40.7383040 /180)*SIN(PI() * lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180))) < = 1.5
#ORDER BY 3963.191 * ACOS((SIN(PI()* 40.7383040 /180)*SIN(PI()*lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180)))"""
# db.executesql(query)
else:
# Calculate in Python
# Pull back all the rows within a square bounding box (faster than checking all features manually)
# Then check each feature within this subset
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
# @ToDo: Support optional Category (make this a generic filter?)
bbox = self.get_bounds_from_radius(lat, lon, radius)
# shortcut
locations = db.gis_location
query = (locations.lat > bbox["lat_min"]) & \
(locations.lat < bbox["lat_max"]) & \
(locations.lon > bbox["lon_min"]) & \
(locations.lon < bbox["lon_max"])
deleted = (locations.deleted == False)
empty = (locations.lat != None) & (locations.lon != None)
query = deleted & empty & query
if tablename:
# Lookup the resource
table = current.s3db[tablename]
query &= (table.location_id == locations.id)
records = db(query).select(table.ALL,
locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
else:
# Lookup the raw Locations
records = db(query).select(locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
features = Rows()
for row in records:
# Calculate the Great Circle distance
if tablename:
distance = self.greatCircleDistance(lat,
lon,
row["gis_location.lat"],
row["gis_location.lon"])
else:
distance = self.greatCircleDistance(lat,
lon,
row.lat,
row.lon)
if distance < radius:
features.records.append(row)
else:
# skip
continue
return features
# -------------------------------------------------------------------------
def get_latlon(self, feature_id, filter=False):
"""
Returns the Lat/Lon for a Feature
used by display_feature() in gis controller
@param feature_id: the feature ID
@param filter: Filter out results based on deployment_settings
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.lat,
table.lon,
table.parent,
table.path,
limitby=(0, 1)).first()
# Zero is an allowed value, hence explicit test for None.
if "lon" in feature and "lat" in feature and \
(feature.lat is not None) and (feature.lon is not None):
return dict(lon=feature.lon, lat=feature.lat)
else:
# Step through ancestors to first with lon, lat.
parents = self.get_parents(feature.id, feature=feature)
if parents:
for row in parents:
lon = row.get("lon", None)
lat = row.get("lat", None)
if (lon is not None) and (lat is not None):
return dict(lon=lon, lat=lat)
# Invalid feature_id
return None
# -------------------------------------------------------------------------
@staticmethod
def get_locations(table,
query,
join = True,
geojson = True,
):
"""
Returns the locations for an XML export
- used by GIS.get_location_data() and S3PivotTable.geojson()
@ToDo: Support multiple locations for a single resource
(e.g. a Project wworking in multiple Communities)
"""
db = current.db
tablename = table._tablename
gtable = current.s3db.gis_location
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
output = {}
if settings.get_gis_spatialdb():
if geojson:
# Do the Simplify & GeoJSON direct from the DB
web2py_installed_version = parse_version(current.request.global_settings.web2py_version)
web2py_installed_datetime = web2py_installed_version[4] # datetime_index = 4
if web2py_installed_datetime >= datetime.datetime(2015, 1, 17, 0, 7, 4):
# Use http://www.postgis.org/docs/ST_SimplifyPreserveTopology.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplifypreservetopology(tolerance).st_asgeojson(precision=4).with_alias("geojson"))
else:
# Use http://www.postgis.org/docs/ST_Simplify.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"))
for row in rows:
output[row[tablename].id] = row.geojson
else:
# Do the Simplify direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_astext().with_alias("wkt"))
for row in rows:
output[row[tablename].id] = row.wkt
else:
rows = db(query).select(table.id,
gtable.wkt)
simplify = GIS.simplify
if geojson:
# Simplify the polygon to reduce download size
if join:
for row in rows:
g = simplify(row["gis_location"].wkt,
tolerance=tolerance,
output="geojson")
if g:
output[row[tablename].id] = g
else:
for row in rows:
g = simplify(row.wkt,
tolerance=tolerance,
output="geojson")
if g:
output[row.id] = g
else:
# Simplify the polygon to reduce download size
# & also to work around the recursion limit in libxslt
# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309
if join:
for row in rows:
wkt = simplify(row["gis_location"].wkt)
if wkt:
output[row[tablename].id] = wkt
else:
for row in rows:
wkt = simplify(row.wkt)
if wkt:
output[row.id] = wkt
return output
# -------------------------------------------------------------------------
@staticmethod
def get_location_data(resource, attr_fields=None):
"""
Returns the locations, markers and popup tooltips for an XML export
e.g. Feature Layers or Search results (Feature Resources)
e.g. Exports in KML, GeoRSS or GPX format
Called by S3REST: S3Resource.export_tree()
@param: resource - S3Resource instance (required)
@param: attr_fields - list of attr_fields to use instead of reading
from get_vars or looking up in gis_layer_feature
"""
tablename = resource.tablename
if tablename == "gis_feature_query":
# Requires no special handling: XSLT uses normal fields
return dict()
NONE = current.messages["NONE"]
#if DEBUG:
# start = datetime.datetime.now()
db = current.db
s3db = current.s3db
request = current.request
get_vars = request.get_vars
ftable = s3db.gis_layer_feature
layer = None
layer_id = get_vars.get("layer", None)
if layer_id:
# Feature Layer
# e.g. Search results loaded as a Feature Resource layer
layer = db(ftable.layer_id == layer_id).select(ftable.attr_fields,
# @ToDo: Deprecate
ftable.popup_fields,
ftable.individual,
ftable.points,
ftable.trackable,
limitby=(0, 1)
).first()
else:
# e.g. KML, GeoRSS or GPX export
# e.g. Volunteer Layer in Vulnerability module
controller = request.controller
function = request.function
query = (ftable.controller == controller) & \
(ftable.function == function)
layers = db(query).select(ftable.layer_id,
ftable.attr_fields,
ftable.popup_fields, # @ToDo: Deprecate
ftable.style_default, # @ToDo: Rename as no longer really 'style'
ftable.individual,
ftable.points,
ftable.trackable,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) > 1:
# We can't provide details for the whole layer, but need to do a per-record check
return None
if layers:
layer = layers.first()
layer_id = layer.layer_id
if not attr_fields:
# Try get_vars
attr_fields = get_vars.get("attr", [])
if attr_fields:
attr_fields = attr_fields.split(",")
popup_fields = get_vars.get("popup", [])
if popup_fields:
popup_fields = popup_fields.split(",")
if layer:
if not popup_fields:
# Lookup from gis_layer_feature
popup_fields = layer.popup_fields or []
if not attr_fields:
# Lookup from gis_layer_feature
# @ToDo: Consider parsing these from style.popup_format instead
# - see S3Report.geojson()
attr_fields = layer.attr_fields or []
individual = layer.individual
points = layer.points
trackable = layer.trackable
else:
if not popup_fields:
popup_fields = ["name"]
individual = False
points = False
trackable = False
table = resource.table
pkey = table._id.name
attributes = {}
markers = {}
styles = {}
_pkey = table[pkey]
# Ensure there are no ID represents to confuse things
_pkey.represent = None
geojson = current.auth.permission.format == "geojson"
if geojson:
# Build the Attributes now so that representations can be
# looked-up in bulk rather than as a separate lookup per record
if popup_fields:
# Old-style
attr_fields = list(set(popup_fields + attr_fields))
if attr_fields:
attr = {}
# Make a copy for the pkey insertion
fields = list(attr_fields)
if pkey not in fields:
fields.insert(0, pkey)
data = resource.select(fields,
limit = None,
represent = True,
show_links = False)
rfields = data["rfields"]
attr_cols = {}
for f in rfields:
fname = f.fname
selector = f.selector
if fname in attr_fields or selector in attr_fields:
fieldname = f.colname
tname, fname = fieldname.split(".")
try:
ftype = db[tname][fname].type
except AttributeError:
# FieldMethod
ftype = None
attr_cols[fieldname] = (ftype, fname)
_pkey = str(_pkey)
rows = data["rows"]
for row in rows:
record_id = int(row[_pkey])
if attr_cols:
attribute = {}
for fieldname in attr_cols:
represent = row[fieldname]
if represent and represent != NONE:
# Skip empty fields
_attr = attr_cols[fieldname]
ftype = _attr[0]
if ftype == "integer":
if isinstance(represent, lazyT):
# Integer is just a lookup key
represent = s3_unicode(represent)
else:
# Attributes should be numbers not strings
# NB This also relies on decoding within geojson/export.xsl and S3XML.__element2json()
try:
represent = int(represent.replace(",", ""))
except:
# @ToDo: Don't assume this i18n formatting...better to have no represent & then bypass the s3_unicode in select too
# (although we *do* want the represent in the tooltips!)
pass
elif ftype == "double":
# Attributes should be numbers not strings
try:
float_represent = float(represent.replace(",", ""))
int_represent = int(float_represent)
if int_represent == float_represent:
represent = int_represent
else:
represent = float_represent
except:
# @ToDo: Don't assume this i18n formatting...better to have no represent & then bypass the s3_unicode in select too
# (although we *do* want the represent in the tooltips!)
pass
else:
represent = s3_unicode(represent)
attribute[_attr[1]] = represent
attr[record_id] = attribute
attributes[tablename] = attr
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# if layer_id:
# layer_name = db(ftable.id == layer_id).select(ftable.name,
# limitby=(0, 1)
# ).first().name
# else:
# layer_name = "Unknown"
# _debug("Attributes lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
_markers = get_vars.get("markers", None)
if _markers:
# Add a per-feature Marker
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
m = {}
for record in resource:
m[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
m = GIS.get_marker(c, f)
markers[tablename] = m
if individual:
# Add a per-feature Style
# Optionally restrict to a specific Config?
#config = GIS.get_config()
stable = s3db.gis_style
query = (stable.deleted == False) & \
(stable.layer_id == layer_id) & \
(stable.record_id.belongs(resource._ids))
#((stable.config_id == config.id) |
# (stable.config_id == None))
rows = db(query).select(stable.record_id,
stable.style)
for row in rows:
styles[row.record_id] = json.dumps(row.style, separators=SEPARATORS)
styles[tablename] = styles
else:
# KML, GeoRSS or GPX
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
# Add a per-feature Marker
for record in resource:
markers[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
markers = GIS.get_marker(c, f)
markers[tablename] = markers
# Lookup the LatLons now so that it can be done as a single
# query rather than per record
#if DEBUG:
# start = datetime.datetime.now()
latlons = {}
#wkts = {}
geojsons = {}
gtable = s3db.gis_location
if trackable:
# Use S3Track
ids = resource._ids
# Ensure IDs in ascending order
ids.sort()
try:
tracker = S3Trackable(table, record_ids=ids)
except SyntaxError:
# This table isn't trackable
pass
else:
_latlons = tracker.get_location(_fields=[gtable.lat,
gtable.lon])
index = 0
for _id in ids:
_location = _latlons[index]
latlons[_id] = (_location.lat, _location.lon)
index += 1
if not latlons:
join = True
#custom = False
if "location_id" in table.fields:
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
elif "site_id" in table.fields:
stable = s3db.org_site
query = (table.id.belongs(resource._ids)) & \
(table.site_id == stable.site_id) & \
(stable.location_id == gtable.id)
elif tablename == "gis_location":
join = False
query = (table.id.belongs(resource._ids))
else:
# Look at the Context
context = resource.get_config("context")
if context:
location_context = context.get("location")
else:
location_context = None
if not location_context:
# Can't display this resource on the Map
return None
# @ToDo: Proper system rather than this hack_which_works_for_current_usecase
# Resolve selector (which automatically attaches any required component)
rfield = resource.resolve_selector(location_context)
if "." in location_context:
# Component
alias, cfield = location_context.split(".", 1)
try:
component = resource.components[alias]
except:
# Invalid alias
# Can't display this resource on the Map
return None
ctablename = component.tablename
ctable = s3db[ctablename]
query = (table.id.belongs(resource._ids)) & \
rfield.join[ctablename] & \
(ctable[cfield] == gtable.id)
#custom = True
# Clear components again
resource.components = Storage()
# @ToDo:
#elif "$" in location_context:
else:
# Can't display this resource on the Map
return None
if geojson and not points:
geojsons[tablename] = GIS.get_locations(table, query, join, geojson)
# @ToDo: Support Polygons in KML, GPX & GeoRSS
#else:
# wkts[tablename] = GIS.get_locations(table, query, join, geojson)
else:
# Points
rows = db(query).select(table.id,
gtable.lat,
gtable.lon)
#if custom:
# # Add geoJSONs
#elif join:
# @ToDo: Support records with multiple locations
# (e.g. an Org with multiple Facs)
if join:
for row in rows:
_location = row["gis_location"]
latlons[row[tablename].id] = (_location.lat, _location.lon)
else:
for row in rows:
latlons[row.id] = (row.lat, row.lon)
_latlons = {}
if latlons:
_latlons[tablename] = latlons
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# _debug("latlons lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
# Used by S3XML's gis_encode()
return dict(geojsons = geojsons,
latlons = _latlons,
#wkts = wkts,
attributes = attributes,
markers = markers,
styles = styles,
)
# -------------------------------------------------------------------------
@staticmethod
def get_marker(controller=None,
function=None,
filter=None,
):
"""
Returns a Marker dict
- called by xml.gis_encode() for non-geojson resources
- called by S3Map.widget() if no marker_fn supplied
"""
marker = None
if controller and function:
# Lookup marker in the gis_style table
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
stable = s3db.gis_style
mtable = s3db.gis_marker
config = GIS.get_config()
query = (ftable.controller == controller) & \
(ftable.function == function) & \
(ftable.aggregate == False)
left = (stable.on((stable.layer_id == ftable.layer_id) & \
(stable.record_id == None) & \
((stable.config_id == config.id) | \
(stable.config_id == None))),
mtable.on(mtable.id == stable.marker_id),
)
if filter:
query &= (ftable.filter == filter)
if current.deployment_settings.get_database_type() == "postgres":
# None is last
orderby = stable.config_id
else:
# None is 1st
orderby = ~stable.config_id
layers = db(query).select(mtable.image,
mtable.height,
mtable.width,
ftable.style_default,
stable.gps_marker,
left=left,
orderby=orderby)
if len(layers) > 1:
layers.exclude(lambda row: row["gis_layer_feature.style_default"] == False)
if len(layers) == 1:
marker = layers.first()
else:
# Can't differentiate
marker = None
if marker:
_marker = marker["gis_marker"]
marker = dict(image=_marker.image,
height=_marker.height,
width=_marker.width,
gps_marker=marker["gis_style"].gps_marker
)
if not marker:
# Default
marker = Marker().as_dict()
return marker
# -------------------------------------------------------------------------
@staticmethod
def get_style(layer_id=None,
aggregate=None,
):
"""
Returns a Style dict
- called by S3Report.geojson()
"""
style = None
if layer_id:
style = Style(layer_id=layer_id,
aggregate=aggregate).as_dict()
if not style:
# Default
style = Style().as_dict()
return style
# -------------------------------------------------------------------------
@staticmethod
def get_screenshot(config_id, temp=True, height=None, width=None):
"""
Save a Screenshot of a saved map
@requires:
PhantomJS http://phantomjs.org
Selenium https://pypi.python.org/pypi/selenium
"""
# @ToDo: allow selection of map_id
map_id = "default_map"
#from selenium import webdriver
# Custom version which is patched to access native PhantomJS functions added to GhostDriver/PhantomJS in:
# https://github.com/watsonmw/ghostdriver/commit/d9b65ed014ed9ff8a5e852cc40e59a0fd66d0cf1
from webdriver import WebDriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
request = current.request
cachepath = os.path.join(request.folder, "static", "cache", "jpg")
if not os.path.exists(cachepath):
try:
os.mkdir(cachepath)
except OSError, os_error:
error = "GIS: JPEG files cannot be saved: %s %s" % \
(cachepath, os_error)
current.log.error(error)
current.session.error = error
redirect(URL(c="gis", f="index", vars={"config_id": config_id}))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Change to the Cache folder (can't render directly there from execute_phantomjs)
os.chdir(cachepath)
#driver = webdriver.PhantomJS()
# Disable Proxy for Win32 Network Latency issue
driver = WebDriver(service_args=["--proxy-type=none"])
# Change back for other parts
os.chdir(cwd)
settings = current.deployment_settings
if height is None:
# Set the size of the browser to match the map
height = settings.get_gis_map_height()
if width is None:
width = settings.get_gis_map_width()
# For Screenshots
#height = 410
#width = 820
driver.set_window_size(width + 5, height + 20)
# Load the homepage
# (Cookie needs to be set on same domain as it takes effect)
base_url = "%s/%s" % (settings.get_base_public_url(),
request.application)
driver.get(base_url)
if not current.auth.override:
# Reuse current session to allow access to ACL-controlled resources
response = current.response
session_id = response.session_id
driver.add_cookie({"name": response.session_id_name,
"value": session_id,
"path": "/",
})
# For sync connections
current.session._unlock(response)
# Load the map
url = "%s/gis/map_viewing_client?print=1&config=%s" % (base_url,
config_id)
driver.get(url)
# Wait for map to load (including it's layers)
# Alternative approach: https://raw.githubusercontent.com/ariya/phantomjs/master/examples/waitfor.js
def map_loaded(driver):
test = '''return S3.gis.maps['%s'].s3.loaded''' % map_id
try:
result = driver.execute_script(test)
except WebDriverException, e:
result = False
return result
try:
# Wait for up to 100s (large screenshots take a long time for layers to load)
WebDriverWait(driver, 100).until(map_loaded)
except TimeoutException, e:
driver.quit()
current.log.error("Timeout: %s" % e)
return None
# Save the Output
# @ToDo: Can we use StringIO instead of cluttering filesystem?
# @ToDo: Allow option of PDF (as well as JPG)
# https://github.com/ariya/phantomjs/blob/master/examples/rasterize.js
if temp:
filename = "%s.jpg" % session_id
else:
filename = "config_%s.jpg" % config_id
# Cannot control file size (no access to clipRect) or file format
#driver.save_screenshot(os.path.join(cachepath, filename))
#driver.page.clipRect = {"top": 10,
# "left": 5,
# "width": width,
# "height": height
# }
#driver.page.render(filename, {"format": "jpeg", "quality": "100"})
script = '''
var page = this;
page.clipRect = {top: 10,
left: 5,
width: %(width)s,
height: %(height)s
};
page.render('%(filename)s', {format: 'jpeg', quality: '100'});''' % \
dict(width = width,
height = height,
filename = filename,
)
try:
result = driver.execute_phantomjs(script)
except WebDriverException, e:
driver.quit()
current.log.error("WebDriver crashed: %s" % e)
return None
driver.quit()
if temp:
# This was a temporary config for creating the screenshot, then delete it now
ctable = current.s3db.gis_config
the_set = current.db(ctable.id == config_id)
config = the_set.select(ctable.temp,
limitby=(0, 1)
).first()
try:
if config.temp:
the_set.delete()
except:
# Record not found?
pass
# Pass the result back to the User
return filename
# -------------------------------------------------------------------------
@staticmethod
def get_shapefile_geojson(resource):
"""
Lookup Shapefile Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary simplification level & precision by Zoom level
- store this in the style?
"""
db = current.db
tablename = "gis_layer_shapefile_%s" % resource._ids[0]
table = db[tablename]
query = resource.get_query()
fields = []
fappend = fields.append
for f in table.fields:
if f not in ("layer_id", "lat", "lon"):
fappend(f)
attributes = {}
geojsons = {}
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
if settings.get_gis_spatialdb():
# Do the Simplify & GeoJSON direct from the DB
fields.remove("the_geom")
fields.remove("wkt")
_fields = [table[f] for f in fields]
rows = db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"),
*_fields)
for row in rows:
_row = row[tablename]
_id = _row.id
geojsons[_id] = row.geojson
_attributes = {}
for f in fields:
if f not in ("id"):
_attributes[f] = _row[f]
attributes[_id] = _attributes
else:
_fields = [table[f] for f in fields]
rows = db(query).select(*_fields)
simplify = GIS.simplify
for row in rows:
# Simplify the polygon to reduce download size
geojson = simplify(row.wkt, tolerance=tolerance,
output="geojson")
_id = row.id
if geojson:
geojsons[_id] = geojson
_attributes = {}
for f in fields:
if f not in ("id", "wkt"):
_attributes[f] = row[f]
attributes[_id] = _attributes
_attributes = {}
_attributes[tablename] = attributes
_geojsons = {}
_geojsons[tablename] = geojsons
# return 'locations'
return dict(attributes = _attributes,
geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def get_theme_geojson(resource):
"""
Lookup Theme Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary precision by Lx
- store this (& tolerance map) in the style?
"""
s3db = current.s3db
tablename = "gis_theme_data"
table = s3db.gis_theme_data
gtable = s3db.gis_location
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
geojsons = {}
# @ToDo: How to get the tolerance to vary by level?
# - add Stored Procedure?
#if current.deployment_settings.get_gis_spatialdb():
# # Do the Simplify & GeoJSON direct from the DB
# rows = current.db(query).select(table.id,
# gtable.the_geom.st_simplify(0.01).st_asgeojson(precision=4).with_alias("geojson"))
# for row in rows:
# geojsons[row["gis_theme_data.id"]] = row.geojson
#else:
rows = current.db(query).select(table.id,
gtable.level,
gtable.wkt)
simplify = GIS.simplify
tolerance = {"L0": 0.01,
"L1": 0.005,
"L2": 0.00125,
"L3": 0.000625,
"L4": 0.0003125,
"L5": 0.00015625,
}
for row in rows:
grow = row.gis_location
# Simplify the polygon to reduce download size
geojson = simplify(grow.wkt,
tolerance=tolerance[grow.level],
output="geojson")
if geojson:
geojsons[row["gis_theme_data.id"]] = geojson
_geojsons = {}
_geojsons[tablename] = geojsons
# Return 'locations'
return dict(geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def greatCircleDistance(lat1, lon1, lat2, lon2, quick=True):
"""
Calculate the shortest distance (in km) over the earth's sphere between 2 points
Formulae from: http://www.movable-type.co.uk/scripts/latlong.html
(NB We could also use PostGIS functions, where possible, instead of this query)
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
radians = math.radians
if quick:
# Spherical Law of Cosines (accurate down to around 1m & computationally quick)
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
distance = math.acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon2 - lon1)) * RADIUS_EARTH
return distance
else:
# Haversine
#asin = math.asin
sqrt = math.sqrt
pow = math.pow
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
a = pow(sin(dLat / 2), 2) + cos(radians(lat1)) * cos(radians(lat2)) * pow(sin(dLon / 2), 2)
c = 2 * math.atan2(sqrt(a), sqrt(1 - a))
#c = 2 * asin(sqrt(a)) # Alternate version
# Convert radians to kilometers
distance = RADIUS_EARTH * c
return distance
# -------------------------------------------------------------------------
@staticmethod
def create_poly(feature):
"""
Create a .poly file for OpenStreetMap exports
http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format
"""
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
name = feature.name
if "wkt" in feature:
wkt = feature.wkt
else:
# WKT not included by default in feature, so retrieve this now
table = current.s3db.gis_location
wkt = current.db(table.id == feature.id).select(table.wkt,
limitby=(0, 1)
).first().wkt
try:
shape = wkt_loads(wkt)
except:
error = "Invalid WKT: %s" % name
current.log.error(error)
return error
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
elif geom_type == "Polygon":
polygons = [shape]
else:
error = "Unsupported Geometry: %s, %s" % (name, geom_type)
current.log.error(error)
return error
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = "%s.poly" % name
filepath = os.path.join(TEMP, filename)
File = open(filepath, "w")
File.write("%s\n" % filename)
count = 1
for polygon in polygons:
File.write("%s\n" % count)
points = polygon.exterior.coords
for point in points:
File.write("\t%s\t%s\n" % (point[0], point[1]))
File.write("END\n")
count += 1
File.write("END\n")
File.close()
return None
# -------------------------------------------------------------------------
@staticmethod
def export_admin_areas(countries=[],
levels=("L0", "L1", "L2", "L3"),
format="geojson",
simplify=0.01,
decimals=4,
):
"""
Export admin areas to /static/cache for use by interactive web-mapping services
- designed for use by the Vulnerability Mapping
@param countries: list of ISO2 country codes
@param levels: list of which Lx levels to export
@param format: Only GeoJSON supported for now (may add KML &/or OSM later)
@param simplify: tolerance for the simplification algorithm. False to disable simplification
@param decimals: number of decimal points to include in the coordinates
"""
db = current.db
s3db = current.s3db
table = s3db.gis_location
ifield = table.id
if countries:
ttable = s3db.gis_location_tag
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(ttable.location_id == ifield) & \
(ttable.tag == "ISO2") & \
(ttable.value.belongs(countries))
else:
# All countries
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(table.deleted != True)
if current.deployment_settings.get_gis_spatialdb():
spatial = True
_field = table.the_geom
if simplify:
# Do the Simplify & GeoJSON direct from the DB
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
else:
# Do the GeoJSON direct from the DB
field = _field.st_asgeojson(precision=decimals).with_alias("geojson")
else:
spatial = False
field = table.wkt
if simplify:
_simplify = GIS.simplify
else:
from shapely.wkt import loads as wkt_loads
from ..geojson import dumps
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
folder = os.path.join(current.request.folder, "static", "cache")
features = []
append = features.append
if "L0" in levels:
# Reduce the decimals in output by 1
_decimals = decimals -1
if spatial:
if simplify:
field = _field.st_simplify(simplify).st_asgeojson(precision=_decimals).with_alias("geojson")
else:
field = _field.st_asgeojson(precision=_decimals).with_alias("geojson")
countries = db(cquery).select(ifield,
field)
for row in countries:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=_decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L0 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "countries.geojson")
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
q1 = (table.level == "L1") & \
(table.deleted != True) & \
(table.end_date == None)
q2 = (table.level == "L2") & \
(table.deleted != True) & \
(table.end_date == None)
q3 = (table.level == "L3") & \
(table.deleted != True) & \
(table.end_date == None)
q4 = (table.level == "L4") & \
(table.deleted != True) & \
(table.end_date == None)
if "L1" in levels:
if "L0" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.005 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
_id = country.id
else:
_id = country["gis_location"].id
query = q1 & (table.parent == _id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L1 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "1_%s.geojson" % _id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L1 features in %s" % _id)
if "L2" in levels:
if "L0" not in levels and "L1" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 4 # 0.00125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L2 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "2_%s.geojson" % l1.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L2 features in %s" % l1.id)
if "L3" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.000625 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L3 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "3_%s.geojson" % l2.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L3 features in %s" % l2.id)
if "L4" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels and "L3" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.0003125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
l3s = db(query).select(ifield)
for l3 in l3s:
query = q4 & (table.parent == l3.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L4 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "4_%s.geojson" % l3.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L4 features in %s" % l3.id)
# -------------------------------------------------------------------------
def import_admin_areas(self,
source="gadmv1",
countries=[],
levels=["L0", "L1", "L2"]
):
"""
Import Admin Boundaries into the Locations table
@param source - Source to get the data from.
Currently only GADM is supported: http://gadm.org
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@param levels - Which levels of the hierarchy to import.
defaults to all 3 supported levels
"""
if source == "gadmv1":
try:
from osgeo import ogr
except:
current.log.error("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm1_L0(ogr, countries=countries)
if "L1" in levels:
self.import_gadm1(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm1(ogr, "L2", countries=countries)
current.log.debug("All done!")
elif source == "gadmv1":
try:
from osgeo import ogr
except:
current.log.error("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm2(ogr, "L0", countries=countries)
if "L1" in levels:
self.import_gadm2(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm2(ogr, "L2", countries=countries)
current.log.debug("All done!")
else:
current.log.warning("Only GADM is currently supported")
return
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm1_L0(ogr, countries=[]):
"""
Import L0 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new records need to be created
@param ogr - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
db = current.db
s3db = current.s3db
ttable = s3db.gis_location_tag
table = db.gis_location
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip",
"zipfile" : "gadm_v1_lev0_shp.zip",
"shapefile" : "gadm1_lev0",
"codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates
"code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
}
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
current.log.error("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.error("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
codeField = layer["codefield"]
code2Field = layer["code2field"]
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
query = (table.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
id = db(query).select(table.id,
limitby=(0, 1)).first().id
query = (table.id == id)
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
ttable.insert(location_id = id,
tag = "ISO3",
value = code2)
#ttable.insert(location_id = location_id,
# tag = "area",
# value = area)
except db._adapter.driver.OperationalError, exception:
current.log.error(sys.exc_info[1])
else:
current.log.debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
def import_gadm1(self, ogr, level="L1", countries=[]):
"""
Import L1 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes a fresh database with just Countries imported
@param ogr - The OGR Python module
@param level - "L1" or "L2"
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
if level == "L1":
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev1_shp.zip",
"zipfile" : "gadm_v1_lev1_shp.zip",
"shapefile" : "gadm1_lev1",
"namefield" : "NAME_1",
# Uniquely identify the L1 for updates
"sourceCodeField" : "ID_1",
"edenCodeField" : "GADM1",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L0",
"parentSourceCodeField" : "ISO",
"parentEdenCodeField" : "ISO3",
}
elif level == "L2":
layer = {
"url" : "http://biogeo.ucdavis.edu/data/gadm/gadm_v1_lev2_shp.zip",
"zipfile" : "gadm_v1_lev2_shp.zip",
"shapefile" : "gadm_v1_lev2",
"namefield" : "NAME_2",
# Uniquely identify the L2 for updates
"sourceCodeField" : "ID_2",
"edenCodeField" : "GADM2",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L1",
"parentSourceCodeField" : "ID_1",
"parentEdenCodeField" : "GADM1",
}
else:
current.log.warning("Level %s not supported!" % level)
return
import csv
import shutil
import zipfile
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db.gis_location
ttable = s3db.gis_location_tag
csv.field_size_limit(2**20 * 100) # 100 megs
# Not all the data is encoded like this
# (unable to determine encoding - appears to be damaged in source):
# Azerbaijan L1
# Vietnam L1 & L2
ENCODING = "cp1251"
# from http://docs.python.org/library/csv.html#csv-examples
def latin_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
for row in csv.reader(unicode_csv_data):
yield [unicode(cell, ENCODING) for cell in row]
def latin_dict_reader(data, dialect=csv.excel, **kwargs):
reader = latin_csv_reader(data, dialect=dialect, **kwargs)
headers = reader.next()
for r in reader:
yield dict(zip(headers, r))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
current.log.error("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
# Remove any existing CSV folder to allow the new one to be created
try:
shutil.rmtree("CSV")
except OSError:
# Folder doesn't exist, so should be creatable
pass
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
# Revert back to the working directory as before.
os.chdir(cwd)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Convert to CSV
current.log.debug("Converting %s.shp to CSV" % layerName)
# Simplified version of generic Shapefile Importer:
# http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/ogr2ogr.py
bSkipFailures = False
nGroupTransactions = 200
nFIDToFetch = ogr.NullFID
inputFileName = "%s.shp" % layerName
inputDS = ogr.Open(inputFileName, False)
outputFileName = "CSV"
outputDriver = ogr.GetDriverByName("CSV")
outputDS = outputDriver.CreateDataSource(outputFileName, options=[])
# GADM only has 1 layer/source
inputLayer = inputDS.GetLayer(0)
inputFDefn = inputLayer.GetLayerDefn()
# Create the output Layer
outputLayer = outputDS.CreateLayer(layerName)
# Copy all Fields
#papszFieldTypesToString = []
inputFieldCount = inputFDefn.GetFieldCount()
panMap = [-1 for i in range(inputFieldCount)]
outputFDefn = outputLayer.GetLayerDefn()
nDstFieldCount = 0
if outputFDefn is not None:
nDstFieldCount = outputFDefn.GetFieldCount()
for iField in range(inputFieldCount):
inputFieldDefn = inputFDefn.GetFieldDefn(iField)
oFieldDefn = ogr.FieldDefn(inputFieldDefn.GetNameRef(),
inputFieldDefn.GetType())
oFieldDefn.SetWidth(inputFieldDefn.GetWidth())
oFieldDefn.SetPrecision(inputFieldDefn.GetPrecision())
# The field may have been already created at layer creation
iDstField = -1;
if outputFDefn is not None:
iDstField = outputFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
elif outputLayer.CreateField(oFieldDefn) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if outputFDefn is None:
outputFDefn = outputLayer.GetLayerDefn()
panMap[iField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
# Transfer features
nFeaturesInTransaction = 0
#iSrcZField = -1
inputLayer.ResetReading()
if nGroupTransactions > 0:
outputLayer.StartTransaction()
while True:
poDstFeature = None
if nFIDToFetch != ogr.NullFID:
# Only fetch feature on first pass.
if nFeaturesInTransaction == 0:
poFeature = inputLayer.GetFeature(nFIDToFetch)
else:
poFeature = None
else:
poFeature = inputLayer.GetNextFeature()
if poFeature is None:
break
nParts = 0
nIters = 1
for iPart in range(nIters):
nFeaturesInTransaction = nFeaturesInTransaction + 1
if nFeaturesInTransaction == nGroupTransactions:
outputLayer.CommitTransaction()
outputLayer.StartTransaction()
nFeaturesInTransaction = 0
poDstFeature = ogr.Feature(outputLayer.GetLayerDefn())
if poDstFeature.SetFromWithMap(poFeature, 1, panMap) != 0:
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
current.log.error("Unable to translate feature %d from layer %s" % \
(poFeature.GetFID(), inputFDefn.GetName()))
# Revert back to the working directory as before.
os.chdir(cwd)
return
poDstGeometry = poDstFeature.GetGeometryRef()
if poDstGeometry is not None:
if nParts > 0:
# For -explodecollections, extract the iPart(th) of the geometry
poPart = poDstGeometry.GetGeometryRef(iPart).Clone()
poDstFeature.SetGeometryDirectly(poPart)
poDstGeometry = poPart
if outputLayer.CreateFeature(poDstFeature) != 0 and \
not bSkipFailures:
if nGroupTransactions > 0:
outputLayer.RollbackTransaction()
# Revert back to the working directory as before.
os.chdir(cwd)
return
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
# Cleanup
outputDS.Destroy()
inputDS.Destroy()
fileName = "%s.csv" % layerName
filePath = os.path.join("CSV", fileName)
os.rename(filePath, fileName)
os.removedirs("CSV")
# Use OGR to read SHP for geometry
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.debug("Open failed.\n")
# Revert back to the working directory as before.
os.chdir(cwd)
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
# Use CSV for Name
current.log.debug("Opening %s.csv" % layerName)
rows = latin_dict_reader(open("%s.csv" % layerName))
nameField = layer["namefield"]
sourceCodeField = layer["sourceCodeField"]
edenCodeField = layer["edenCodeField"]
parentSourceCodeField = layer["parentSourceCodeField"]
parentLevel = layer["parent"]
parentEdenCodeField = layer["parentEdenCodeField"]
parentCodeQuery = (ttable.tag == parentEdenCodeField)
count = 0
for row in rows:
# Read Attributes
feat = lyr[count]
parentCode = feat.GetField(parentSourceCodeField)
query = (table.level == parentLevel) & \
parentCodeQuery & \
(ttable.value == parentCode)
parent = db(query).select(table.id,
ttable.value,
limitby=(0, 1),
cache=cache).first()
if not parent:
# Skip locations for which we don't have a valid parent
current.log.warning("Skipping - cannot find parent with key: %s, value: %s" % \
(parentEdenCodeField, parentCode))
count += 1
continue
if countries:
# Skip the countries which we're not interested in
if level == "L1":
if parent["gis_location_tag"].value not in countries:
#current.log.warning("Skipping %s as not in countries list" % parent["gis_location_tag"].value)
count += 1
continue
else:
# Check grandparent
country = self.get_parent_country(parent.id,
key_type="code")
if country not in countries:
count += 1
continue
# This is got from CSV in order to be able to handle the encoding
name = row.pop(nameField)
name.encode("utf8")
code = feat.GetField(sourceCodeField)
#area = feat.GetField("Shape_Area")
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
lat = geom.GetX()
lon = geom.GetY()
id = table.insert(name=name,
level=level,
gis_feature_type=1,
lat=lat,
lon=lon,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
id = table.insert(name=name,
level=level,
gis_feature_type=gis_feature_type,
wkt=wkt,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
current.log.debug("No geometry\n")
count += 1
# Close the shapefile
ds.Destroy()
db.commit()
current.log.debug("Updating Location Tree...")
try:
self.update_location_tree()
except MemoryError:
# If doing all L2s, it can break memory limits
# @ToDo: Check now that we're doing by level
current.log.critical("Memory error when trying to update_location_tree()!")
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm2(ogr, level="L0", countries=[]):
"""
Import Admin Boundaries into the Locations table from GADMv2
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new L0 records need to be created
@param ogr - The OGR Python module
@param level - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@ToDo: Complete this
- not currently possible to get all data from the 1 file easily
- no ISO2
- needs updating for gis_location_tag model
- only the lowest available levels accessible
- use GADMv1 for L0, L1, L2 & GADMv2 for specific lower?
"""
if level == "L0":
codeField = "ISO2" # This field is used to uniquely identify the L0 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
elif level == "L1":
#nameField = "NAME_1"
codeField = "ID_1" # This field is used to uniquely identify the L1 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
#parent = "L0"
#parentCode = "code2"
elif level == "L2":
#nameField = "NAME_2"
codeField = "ID_2" # This field is used to uniquely identify the L2 for updates
code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s
#parent = "L1"
#parentCode = "code"
else:
current.log.error("Level %s not supported!" % level)
return
db = current.db
s3db = current.s3db
table = s3db.gis_location
url = "http://gadm.org/data2/gadm_v2_shp.zip"
zipfile = "gadm_v2_shp.zip"
shapefile = "gadm2"
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv2")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
layerName = shapefile
# Check if file has already been downloaded
fileName = zipfile
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.debug("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
## FIXME
##query = (table.code == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
#code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
## FIXME
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
#code2=code2,
#area=area
except db._adapter.driver.OperationalError, exception:
current.log.error(exception)
else:
current.log.debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
def import_geonames(self, country, level=None):
"""
Import Locations from the Geonames database
@param country: the 2-letter country code
@param level: the ADM level to import
Designed to be run from the CLI
Levels should be imported sequentially.
It is assumed that L0 exists in the DB already
L1-L3 may have been imported from Shapefiles with Polygon info
Geonames can then be used to populate the lower levels of hierarchy
"""
import codecs
from shapely.geometry import point
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
#cache = s3db.cache
request = current.request
#settings = current.deployment_settings
table = s3db.gis_location
ttable = s3db.gis_location_tag
url = "http://download.geonames.org/export/dump/" + country + ".zip"
cachepath = os.path.join(request.folder, "cache")
filename = country + ".txt"
filepath = os.path.join(cachepath, filename)
if os.access(filepath, os.R_OK):
cached = True
else:
cached = False
if not os.access(cachepath, os.W_OK):
current.log.error("Folder not writable", cachepath)
return
if not cached:
# Download File
from gluon.tools import fetch
try:
f = fetch(url)
except (urllib2.URLError,):
e = sys.exc_info()[1]
current.log.error("URL Error", e)
return
except (urllib2.HTTPError,):
e = sys.exc_info()[1]
current.log.error("HTTP Error", e)
return
# Unzip File
if f[:2] == "PK":
# Unzip
fp = StringIO(f)
import zipfile
myfile = zipfile.ZipFile(fp)
try:
# Python 2.6+ only :/
# For now, 2.5 users need to download/unzip manually to cache folder
myfile.extract(filename, cachepath)
myfile.close()
except IOError:
current.log.error("Zipfile contents don't seem correct!")
myfile.close()
return
f = codecs.open(filepath, encoding="utf-8")
# Downloaded file is worth keeping
#os.remove(filepath)
if level == "L1":
fc = "ADM1"
parent_level = "L0"
elif level == "L2":
fc = "ADM2"
parent_level = "L1"
elif level == "L3":
fc = "ADM3"
parent_level = "L2"
elif level == "L4":
fc = "ADM4"
parent_level = "L3"
else:
# 5 levels of hierarchy or 4?
# @ToDo make more extensible still
#gis_location_hierarchy = self.get_location_hierarchy()
try:
#label = gis_location_hierarchy["L5"]
level = "L5"
parent_level = "L4"
except:
# ADM4 data in Geonames isn't always good (e.g. PK bad)
level = "L4"
parent_level = "L3"
finally:
fc = "PPL"
deleted = (table.deleted == False)
query = deleted & (table.level == parent_level)
# Do the DB query once (outside loop)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
if not all_parents:
# No locations in the parent level found
# - use the one higher instead
parent_level = "L" + str(int(parent_level[1:]) + 1)
query = deleted & (table.level == parent_level)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
# Parse File
current_row = 0
for line in f:
current_row += 1
# Format of file: http://download.geonames.org/export/dump/readme.txt
geonameid, \
name, \
asciiname, \
alternatenames, \
lat, \
lon, \
feature_class, \
feature_code, \
country_code, \
cc2, \
admin1_code, \
admin2_code, \
admin3_code, \
admin4_code, \
population, \
elevation, \
gtopo30, \
timezone, \
modification_date = line.split("\t")
if feature_code == fc:
# Add WKT
lat = float(lat)
lon = float(lon)
wkt = self.latlon_to_wkt(lat, lon)
shape = point.Point(lon, lat)
# Add Bounds
lon_min = lon_max = lon
lat_min = lat_max = lat
# Locate Parent
parent = ""
# 1st check for Parents whose bounds include this location (faster)
def in_bbox(row):
return (row.lon_min < lon_min) & \
(row.lon_max > lon_max) & \
(row.lat_min < lat_min) & \
(row.lat_max > lat_max)
for row in all_parents.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
# @ToDo provide option to use PostGIS/Spatialite
try:
parent_shape = wkt_loads(row.wkt)
if parent_shape.intersects(shape):
parent = row.id
# Should be just a single parent
break
except ReadingError:
current.log.error("Error reading wkt of location with id", row.id)
# Add entry to database
new_id = table.insert(name=name,
level=level,
parent=parent,
lat=lat,
lon=lon,
wkt=wkt,
lon_min=lon_min,
lon_max=lon_max,
lat_min=lat_min,
lat_max=lat_max)
ttable.insert(location_id=new_id,
tag="geonames",
value=geonameid)
else:
continue
current.log.debug("All done!")
return
# -------------------------------------------------------------------------
@staticmethod
def latlon_to_wkt(lat, lon):
"""
Convert a LatLon to a WKT string
>>> s3gis.latlon_to_wkt(6, 80)
'POINT(80 6)'
"""
WKT = "POINT(%f %f)" % (lon, lat)
return WKT
# -------------------------------------------------------------------------
@staticmethod
def parse_location(wkt, lon=None, lat=None):
"""
Parses a location from wkt, returning wkt, lat, lon, bounding box and type.
For points, wkt may be None if lat and lon are provided; wkt will be generated.
For lines and polygons, the lat, lon returned represent the shape's centroid.
Centroid and bounding box will be None if Shapely is not available.
"""
if not wkt:
if not lon is not None and lat is not None:
raise RuntimeError, "Need wkt or lon+lat to parse a location"
wkt = "POINT(%f %f)" % (lon, lat)
geom_type = GEOM_TYPES["point"]
bbox = (lon, lat, lon, lat)
else:
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
if SHAPELY:
shape = wkt_loads(wkt)
centroid = shape.centroid
lat = centroid.y
lon = centroid.x
geom_type = GEOM_TYPES[shape.type.lower()]
bbox = shape.bounds
else:
lat = None
lon = None
geom_type = GEOM_TYPES[wkt.split("(")[0].lower()]
bbox = None
res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type}
if bbox:
res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox
return res
# -------------------------------------------------------------------------
@staticmethod
def update_location_tree(feature=None, all_locations=False):
"""
Update GIS Locations' Materialized path, Lx locations, Lat/Lon & the_geom
@param feature: a feature dict to update the tree for
- if not provided then update the whole tree
@param all_locations: passed to recursive calls to indicate that this
is an update of the whole tree. Used to avoid repeated attempts to
update hierarchy locations with missing data (e.g. lacking some
ancestor level).
returns the path of the feature
Called onaccept for locations (async, where-possible)
"""
# During prepopulate, for efficiency, we don't update the location
# tree, but rather leave that til after prepopulate is complete.
if GIS.disable_update_location_tree:
return None
db = current.db
try:
table = db.gis_location
except:
table = current.s3db.gis_location
spatial = current.deployment_settings.get_gis_spatialdb()
update_location_tree = GIS.update_location_tree
wkt_centroid = GIS.wkt_centroid
fields = (table.id,
table.name,
table.level,
table.path,
table.parent,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
table.lat,
table.lon,
table.wkt,
table.inherited
)
# ---------------------------------------------------------------------
def fixup(feature):
"""
Fix all the issues with a Feature, assuming that
- the corrections are in the feature
- or they are Bounds / Centroid / WKT / the_geom issues
"""
form = Storage()
form.vars = form_vars = feature
form.errors = Storage()
if not form_vars.get("wkt"):
# Point
form_vars.update(gis_feature_type="1")
# Calculate Bounds / Centroid / WKT / the_geom
wkt_centroid(form)
if form.errors:
current.log.error("S3GIS: %s" % form.errors)
else:
wkt = form_vars.wkt
if wkt and not wkt.startswith("POI"):
# Polygons aren't inherited
form_vars.update(inherited = False)
if "update_record" in form_vars:
# Must be a Row
new_vars = {}
table_fields = table.fields
for v in form_vars:
if v in table_fields:
new_vars[v] = form_vars[v]
form_vars = new_vars
try:
db(table.id == feature.id).update(**form_vars)
except MemoryError:
current.log.error("S3GIS: Unable to set bounds & centroid for feature %s: MemoryError" % feature.id)
# ---------------------------------------------------------------------
def propagate(parent):
"""
Propagate Lat/Lon down to any Features which inherit from this one
@param parent: gis_location id of parent
@param all_locations: passed to recursive calls to indicate that
this is an update of the whole tree
"""
query = (table.parent == parent) & \
(table.inherited == True)
rows = db(query).select(*fields)
for row in rows:
try:
update_location_tree(row)
except RuntimeError:
current.log.error("Cannot propagate inherited latlon to child %s of location ID %s: too much recursion" % \
(row.id, parent))
if not feature:
# We are updating all locations.
all_locations = True
# Do in chunks to save memory and also do in correct order
all_fields = (table.id, table.name, table.gis_feature_type,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.wkt, table.inherited,
# Handle Countries which start with Bounds set, yet are Points
table.lat_min, table.lon_min, table.lat_max, table.lon_max,
table.path, table.parent)
for level in ("L0", "L1", "L2", "L3", "L4", "L5", None):
query = (table.level == level) & (table.deleted == False)
try:
features = db(query).select(*all_fields)
except MemoryError:
current.log.error("S3GIS: Unable to update Location Tree for level %s: MemoryError" % level)
else:
for feature in features:
feature["level"] = level
wkt = feature["wkt"]
if wkt and not wkt.startswith("POI"):
# Polygons aren't inherited
feature["inherited"] = False
update_location_tree(feature) # all_locations is False here
# All Done!
return
# Single Feature
id = str(feature["id"]) if "id" in feature else None
if not id:
# Nothing we can do
raise ValueError
# L0
level = feature.get("level", False)
name = feature.get("name", False)
path = feature.get("path", False)
# If we're processing all locations, and this is a hierarchy location,
# and has already been processed (as evidenced by having a path) do not
# process it again. Locations with a gap in their ancestor levels will
# be regarded as missing data and sent through update_location_tree
# recursively, but that missing data will not be filled in after the
# location is processed once during the all-locations call.
if all_locations and path and level:
# This hierarchy location is already finalized.
return path
lat = feature.get("lat", False)
lon = feature.get("lon", False)
wkt = feature.get("wkt", False)
L0 = feature.get("L0", False)
if level == "L0":
if name is False or path is False or lat is False or lon is False or \
wkt is False or L0 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
limitby=(0, 1)).first()
name = feature.name
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
if path != id or L0 != name or not wkt or lat is None:
# Fix everything up
path = id
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = False,
path = path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = name,
L1 = None,
L2 = None,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return path
fixup_required = False
# L1
inherited = feature.get("inherited", None)
parent = feature.get("parent", False)
L1 = feature.get("L1", False)
if level == "L1":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
if parent:
_path = "%s/%s" % (parent, id)
_L0 = db(table.id == parent).select(table.name,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = _L0.name
L0_lat = _L0.lat
L0_lon = _L0.lon
else:
_path = id
L0_name = None
L0_lat = None
L0_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = L0_lat
lon = L0_lon
elif path != _path or L0 != L0_name or L1 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = name,
L2 = None,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L2
L2 = feature.get("L2", False)
if level == "L2":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
if parent:
Lx = db(table.id == parent).select(table.name,
table.level,
table.parent,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L1":
L1_name = Lx.name
_parent = Lx.parent
if _parent:
_path = "%s/%s/%s" % (_parent, parent, id)
L0_name = db(table.id == _parent).select(table.name,
limitby=(0, 1),
cache=current.s3db.cache
).first().name
else:
_path = "%s/%s" % (parent, id)
L0_name = None
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
else:
current.log.error("Parent of L2 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = name,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L3
L3 = feature.get("L3", False)
if level == "L3":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
limitby=(0, 1)).first()
if Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = name,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L4
L4 = feature.get("L4", False)
if level == "L4":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
if Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = name,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L5
L5 = feature.get("L5", False)
if level == "L5":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
if Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
L4_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
L4_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != L4_name or L5 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = L4_name,
L5 = name,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# Specific Location
# - or unspecified (which we should avoid happening as inefficient)
if inherited is None or level is False or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.level,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
inherited = feature.inherited
level = feature.level
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
L0_name = name if level == "L0" else None
L1_name = name if level == "L1" else None
L2_name = name if level == "L2" else None
L3_name = name if level == "L3" else None
L4_name = name if level == "L4" else None
L5_name = name if level == "L5" else None
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
if Lx.level == "L5":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
L5_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name and L4_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
elif Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != L4_name or L5 != L5_name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = L4_name,
L5 = L5_name,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# -------------------------------------------------------------------------
@staticmethod
def wkt_centroid(form):
"""
OnValidation callback:
If a WKT is defined: validate the format,
calculate the LonLat of the Centroid, and set bounds
Else if a LonLat is defined: calculate the WKT for the Point.
"""
form_vars = form.vars
if form_vars.get("gis_feature_type", None) == "1":
# Point
lat = form_vars.get("lat", None)
lon = form_vars.get("lon", None)
if (lon is None and lat is None) or \
(lon == "" and lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#form_vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
elif lat is None or lat == "":
# Can't just have lon without lat
form.errors["lat"] = current.messages.lat_empty
elif lon is None or lon == "":
form.errors["lon"] = current.messages.lon_empty
else:
form_vars.wkt = "POINT(%(lon)s %(lat)s)" % form_vars
radius = form_vars.get("radius", None)
if radius:
bbox = GIS.get_bounds_from_radius(lat, lon, radius)
form_vars.lat_min = bbox["lat_min"]
form_vars.lon_min = bbox["lon_min"]
form_vars.lat_max = bbox["lat_max"]
form_vars.lon_max = bbox["lon_max"]
else:
if "lon_min" not in form_vars or form_vars.lon_min is None:
form_vars.lon_min = lon
if "lon_max" not in form_vars or form_vars.lon_max is None:
form_vars.lon_max = lon
if "lat_min" not in form_vars or form_vars.lat_min is None:
form_vars.lat_min = lat
if "lat_max" not in form_vars or form_vars.lat_max is None:
form_vars.lat_max = lat
elif form_vars.get("wkt", None):
# Parse WKT for LineString, Polygon, etc
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(form_vars.wkt)
except:
try:
# Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way)
linestring = "LINESTRING%s" % form_vars.wkt[8:-1]
shape = wkt_loads(linestring)
form_vars.wkt = linestring
except:
form.errors["wkt"] = current.messages.invalid_wkt
return
gis_feature_type = shape.type
if gis_feature_type == "Point":
form_vars.gis_feature_type = 1
elif gis_feature_type == "LineString":
form_vars.gis_feature_type = 2
elif gis_feature_type == "Polygon":
form_vars.gis_feature_type = 3
elif gis_feature_type == "MultiPoint":
form_vars.gis_feature_type = 4
elif gis_feature_type == "MultiLineString":
form_vars.gis_feature_type = 5
elif gis_feature_type == "MultiPolygon":
form_vars.gis_feature_type = 6
elif gis_feature_type == "GeometryCollection":
form_vars.gis_feature_type = 7
try:
centroid_point = shape.centroid
form_vars.lon = centroid_point.x
form_vars.lat = centroid_point.y
bounds = shape.bounds
if gis_feature_type != "Point" or \
"lon_min" not in form_vars or form_vars.lon_min is None or \
form_vars.lon_min == form_vars.lon_max:
# Update bounds unless we have a 'Point' which has already got wider Bounds specified (such as a country)
form_vars.lon_min = bounds[0]
form_vars.lat_min = bounds[1]
form_vars.lon_max = bounds[2]
form_vars.lat_max = bounds[3]
except:
form.errors.gis_feature_type = current.messages.centroid_error
elif (form_vars.lon is None and form_vars.lat is None) or \
(form_vars.lon == "" and form_vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#form_vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
else:
# Point
form_vars.gis_feature_type = "1"
if form_vars.lat is None or form_vars.lat == "":
form.errors["lat"] = current.messages.lat_empty
elif form_vars.lon is None or form_vars.lon == "":
form.errors["lon"] = current.messages.lon_empty
else:
form_vars.wkt = "POINT(%(lon)s %(lat)s)" % form_vars
if "lon_min" not in form_vars or form_vars.lon_min is None:
form_vars.lon_min = form_vars.lon
if "lon_max" not in form_vars or form_vars.lon_max is None:
form_vars.lon_max = form_vars.lon
if "lat_min" not in form_vars or form_vars.lat_min is None:
form_vars.lat_min = form_vars.lat
if "lat_max" not in form_vars or form_vars.lat_max is None:
form_vars.lat_max = form_vars.lat
if current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
form_vars.the_geom = form_vars.wkt
return
# -------------------------------------------------------------------------
@staticmethod
def query_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns a query of all Locations inside the given bounding box
"""
table = current.s3db.gis_location
query = (table.lat_min <= lat_max) & \
(table.lat_max >= lat_min) & \
(table.lon_min <= lon_max) & \
(table.lon_max >= lon_min)
return query
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns Rows of Locations whose shape intersects the given bbox.
"""
query = current.gis.query_features_by_bbox(lon_min,
lat_min,
lon_max,
lat_max)
return current.db(query).select()
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_shape(shape):
"""
Returns Rows of locations which intersect the given shape.
Relies on Shapely for wkt parsing and intersection.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
table = current.s3db.gis_location
in_bbox = current.gis.query_features_by_bbox(*shape.bounds)
has_wkt = (table.wkt != None) & (table.wkt != "")
for loc in current.db(in_bbox & has_wkt).select():
try:
location_shape = wkt_loads(loc.wkt)
if location_shape.intersects(shape):
yield loc
except ReadingError:
current.log.error("Error reading wkt of location with id", loc.id)
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_latlon(lat, lon):
"""
Returns a generator of locations whose shape intersects the given LatLon.
Relies on Shapely.
@todo: provide an option to use PostGIS/Spatialite
"""
from shapely.geometry import point
return current.gis.get_features_by_shape(point.Point(lon, lat))
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_feature(feature):
"""
Returns all Locations whose geometry intersects the given feature.
Relies on Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.wkt import loads as wkt_loads
shape = wkt_loads(feature.wkt)
return current.gis.get_features_by_shape(shape)
# -------------------------------------------------------------------------
@staticmethod
def set_all_bounds():
"""
Sets bounds for all locations without them.
If shapely is present, and a location has wkt, bounds of the geometry
are used. Otherwise, the (lat, lon) are used as bounds.
"""
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
db = current.db
table = current.s3db.gis_location
# Query to find all locations without bounds set
no_bounds = (table.lon_min == None) & \
(table.lat_min == None) & \
(table.lon_max == None) & \
(table.lat_max == None) & \
(table.lat != None) & \
(table.lon != None)
if SHAPELY:
# Refine to those locations with a WKT field
wkt_no_bounds = no_bounds & (table.wkt != None) & (table.wkt != "")
for location in db(wkt_no_bounds).select(table.wkt):
try :
shape = wkt_loads(location.wkt)
except:
current.log.error("Error reading WKT", location.wkt)
continue
bounds = shape.bounds
table[location.id] = dict(lon_min = bounds[0],
lat_min = bounds[1],
lon_max = bounds[2],
lat_max = bounds[3],
)
# Anything left, we assume is a Point, so set the bounds to be the same
db(no_bounds).update(lon_min=table.lon,
lat_min=table.lat,
lon_max=table.lon,
lat_max=table.lat)
# -------------------------------------------------------------------------
@staticmethod
def simplify(wkt,
tolerance=None,
preserve_topology=True,
output="wkt",
decimals=4
):
"""
Simplify a complex Polygon using the Douglas-Peucker algorithm
- NB This uses Python, better performance will be gained by doing
this direct from the database if you are using PostGIS:
ST_Simplify() is available as
db(query).select(table.the_geom.st_simplify(tolerance).st_astext().with_alias('wkt')).first().wkt
db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson().with_alias('geojson')).first().geojson
@param wkt: the WKT string to be simplified (usually coming from a gis_location record)
@param tolerance: how aggressive a simplification to perform
@param preserve_topology: whether the simplified geometry should be maintained
@param output: whether to output as WKT or GeoJSON format
@param decimals: the number of decimal places to include in the output
"""
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
try:
shape = wkt_loads(wkt)
except:
wkt = wkt[10] if wkt else wkt
current.log.error("Invalid Shape: %s" % wkt)
return None
if not tolerance:
tolerance = current.deployment_settings.get_gis_simplify_tolerance()
if tolerance:
shape = shape.simplify(tolerance, preserve_topology)
# Limit the number of decimal places
formatter = ".%sf" % decimals
def shrink_polygon(shape):
""" Helper Function """
points = shape.exterior.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
return Polygon(LineString(coords))
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
p = []
pappend = p.append
for polygon in polygons:
pappend(shrink_polygon(polygon))
shape = MultiPolygon([s for s in p])
elif geom_type == "Polygon":
shape = shrink_polygon(shape)
elif geom_type == "LineString":
points = shape.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
shape = LineString(coords)
elif geom_type == "Point":
x = float(format(shape.x, formatter))
y = float(format(shape.y, formatter))
shape = Point(x, y)
else:
current.log.info("Cannot yet shrink Geometry: %s" % geom_type)
# Output
if output == "wkt":
output = shape.to_wkt()
elif output == "geojson":
from ..geojson import dumps
# Compact Encoding
output = dumps(shape, separators=SEPARATORS)
return output
# -------------------------------------------------------------------------
def show_map(self,
id = "default_map",
height = None,
width = None,
bbox = {},
lat = None,
lon = None,
zoom = None,
projection = None,
add_feature = False,
add_feature_active = False,
add_line = False,
add_line_active = False,
add_polygon = False,
add_polygon_active = False,
features = None,
feature_queries = None,
feature_resources = None,
wms_browser = {},
catalogue_layers = False,
legend = False,
toolbar = False,
area = False,
color_picker = False,
clear_layers = None,
nav = None,
print_control = None,
print_mode = False,
save = False,
search = False,
mouse_position = None,
overview = None,
permalink = None,
scaleline = None,
zoomcontrol = None,
zoomWheelEnabled = True,
mgrs = {},
window = False,
window_hide = False,
closable = True,
maximizable = True,
collapsed = False,
callback = "DEFAULT",
plugins = None,
):
"""
Returns the HTML to display a map
Normally called in the controller as: map = gis.show_map()
In the view, put: {{=XML(map)}}
@param id: ID to uniquely identify this map if there are several on a page
@param height: Height of viewport (if not provided then the default deployment setting is used)
@param width: Width of viewport (if not provided then the default deployment setting is used)
@param bbox: default Bounding Box of viewport (if not provided then the Lat/Lon/Zoom are used) (Dict):
{"lon_min" : float,
"lat_min" : float,
"lon_max" : float,
"lat_max" : float,
}
@param lat: default Latitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param lon: default Longitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param zoom: default Zoom level of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param projection: EPSG code for the Projection to use (if not provided then the default setting from the Map Service Catalogue is used)
@param add_feature: Whether to include a DrawFeature control to allow adding a marker to the map
@param add_feature_active: Whether the DrawFeature control should be active by default
@param add_polygon: Whether to include a DrawFeature control to allow drawing a polygon over the map
@param add_polygon_active: Whether the DrawFeature control should be active by default
@param features: Simple Features to overlay on Map (no control over appearance & not interactive)
[wkt]
@param feature_queries: Feature Queries to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"query" : query, # A gluon.sql.Rows of gis_locations, which can be from a simple query or a Join.
# Extra fields can be added for 'popup_url', 'popup_label' & either
# 'marker' (url/height/width) or 'shape' (with optional 'colour' & 'size')
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
"marker" : None, # Optional: A per-Layer marker query or marker_id for the icon used to display the feature
"opacity" : 1, # Optional
"cluster_attribute", # Optional
"cluster_distance", # Optional
"cluster_threshold" # Optional
}]
@param feature_resources: REST URLs for (filtered) resources to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"id" : "search", # A string: the id for the layer (for manipulation by JavaScript)
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
EITHER:
"layer_id" : 1, # An integer: the layer_id to load (optional alternative to specifying URL/tablename/marker)
"filter" : "filter", # A string: an optional URL filter which *replaces* any in the layer
OR:
"tablename" : "module_resource", # A string: the tablename (used to determine whether to locate via location_id or site_id)
"url" : "/eden/module/resource.geojson?filter", # A URL to load the resource
"marker" : None, # Optional: A per-Layer marker dict for the icon used to display the feature (overrides layer_id if-set)
"opacity" : 1, # Optional (overrides layer_id if-set)
"cluster_attribute", # Optional (overrides layer_id if-set)
"cluster_distance", # Optional (overrides layer_id if-set)
"cluster_threshold", # Optional (overrides layer_id if-set)
"dir", # Optional (overrides layer_id if-set)
"style", # Optional (overrides layer_id if-set)
}]
@param wms_browser: WMS Server's GetCapabilities & options (dict)
{"name": T("MyLabel"), # Name for the Folder in LayerTree
"url": string # URL of GetCapabilities
}
@param catalogue_layers: Show all the enabled Layers from the GIS Catalogue
Defaults to False: Just show the default Base layer
@param legend: True: Show the GeoExt Legend panel, False: No Panel, "float": New floating Legend Panel
@param toolbar: Show the Icon Toolbar of Controls
@param area: Show the Area tool on the Toolbar
@param color_picker: Show the Color Picker tool on the Toolbar (used for S3LocationSelector...pick up in postprocess)
If a style is provided then this is used as the default style
@param nav: Show the Navigation controls on the Toolbar
@param save: Show the Save tool on the Toolbar
@param search: Show the Geonames search box (requires a username to be configured)
@param mouse_position: Show the current coordinates in the bottom-right of the map. 3 Options: 'normal', 'mgrs', False (defaults to checking deployment_settings, which defaults to 'normal')
@param overview: Show the Overview Map (defaults to checking deployment_settings, which defaults to True)
@param permalink: Show the Permalink control (defaults to checking deployment_settings, which defaults to True)
@param scaleline: Show the ScaleLine control (defaults to checking deployment_settings, which defaults to True)
@param zoomcontrol: Show the Zoom control (defaults to checking deployment_settings, which defaults to True)
@param mgrs: Use the MGRS Control to select PDFs
{"name": string, # Name for the Control
"url": string # URL of PDF server
}
@ToDo: Also add MGRS Search support: http://gxp.opengeo.org/master/examples/mgrs.html
@param window: Have viewport pop out of page into a resizable window
@param window_hide: Have the window hidden by default, ready to appear (e.g. on clicking a button)
@param closable: In Window mode, whether the window is closable or not
@param collapsed: Start the Tools panel (West region) collapsed
@param callback: Code to run once the Map JavaScript has loaded
@param plugins: an iterable of objects which support the following methods:
.extend_gis_map(map)
Client-side portion suppoprts the following methods:
.addToMapWindow(items)
.setup(map)
"""
return MAP(id = id,
height = height,
width = width,
bbox = bbox,
lat = lat,
lon = lon,
zoom = zoom,
projection = projection,
add_feature = add_feature,
add_feature_active = add_feature_active,
add_line = add_line,
add_line_active = add_line_active,
add_polygon = add_polygon,
add_polygon_active = add_polygon_active,
features = features,
feature_queries = feature_queries,
feature_resources = feature_resources,
wms_browser = wms_browser,
catalogue_layers = catalogue_layers,
legend = legend,
toolbar = toolbar,
area = area,
color_picker = color_picker,
clear_layers = clear_layers,
nav = nav,
print_control = print_control,
print_mode = print_mode,
save = save,
search = search,
mouse_position = mouse_position,
overview = overview,
permalink = permalink,
scaleline = scaleline,
zoomcontrol = zoomcontrol,
zoomWheelEnabled = zoomWheelEnabled,
mgrs = mgrs,
window = window,
window_hide = window_hide,
closable = closable,
maximizable = maximizable,
collapsed = collapsed,
callback = callback,
plugins = plugins,
)
# =============================================================================
class MAP(DIV):
"""
HTML Helper to render a Map
- allows the Map to be generated only when being rendered
- used by gis.show_map()
"""
def __init__(self, **opts):
"""
:param **opts: options to pass to the Map for server-side processing
"""
# We haven't yet run _setup()
self.setup = False
self.callback = None
# Options for server-side processing
self.opts = opts
self.id = map_id = opts.get("id", "default_map")
# Options for client-side processing
self.options = {}
# Components
# Map (Embedded not Window)
components = [DIV(DIV(_class="map_loader"),
_id="%s_panel" % map_id)
]
self.components = components
for c in components:
self._setnode(c)
# Adapt CSS to size of Map
_class = "map_wrapper"
if opts.get("window"):
_class = "%s fullscreen" % _class
if opts.get("print_mode"):
_class = "%s print" % _class
self.attributes = {"_class": _class,
"_id": map_id,
}
self.parent = None
# Show Color Picker?
if opts.get("color_picker"):
# Can't be done in _setup() as usually run from xml() and hence we've already passed this part of the layout.html
s3 = current.response.s3
if s3.debug:
style = "plugins/spectrum.css"
else:
style = "plugins/spectrum.min.css"
if style not in s3.stylesheets:
s3.stylesheets.append(style)
# -------------------------------------------------------------------------
def _setup(self):
"""
Setup the Map
- not done during init() to be as Lazy as possible
- separated from xml() in order to be able to read options to put
into scripts (callback or otherwise)
"""
# Read configuration
config = GIS.get_config()
if not config:
# No prepop - Bail
current.session.error = current.T("Map cannot display without prepop data!")
redirect(URL(c="default", f="index"))
opts = self.opts
T = current.T
db = current.db
auth = current.auth
s3db = current.s3db
request = current.request
response = current.response
if not response.warning:
response.warning = ""
s3 = response.s3
ctable = db.gis_config
settings = current.deployment_settings
MAP_ADMIN = auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN)
# Support bookmarks (such as from the control)
# - these over-ride the arguments
get_vars = request.get_vars
# JS Globals
js_globals = {}
# Map Options for client-side processing
options = {}
# Strings used by all Maps
i18n = {"gis_base_layers": T("Base Layers"),
"gis_overlays": T(settings.get_gis_label_overlays()),
"gis_layers": T(settings.get_gis_layers_label()),
"gis_draft_layer": T("Draft Features"),
"gis_cluster_multiple": T("There are multiple records at this location"),
"gis_loading": T("Loading"),
"gis_requires_login": T("Requires Login"),
"gis_too_many_features": T("There are too many features, please Zoom In or Filter"),
"gis_zoomin": T("Zoom In"),
}
##########
# Viewport
##########
height = opts.get("height", None)
if height:
map_height = height
else:
map_height = settings.get_gis_map_height()
options["map_height"] = map_height
width = opts.get("width", None)
if width:
map_width = width
else:
map_width = settings.get_gis_map_width()
options["map_width"] = map_width
# Bounding Box or Center/Zoom
bbox = opts.get("bbox", None)
if (bbox
and (-90 <= bbox["lat_max"] <= 90)
and (-90 <= bbox["lat_min"] <= 90)
and (-180 <= bbox["lon_max"] <= 180)
and (-180 <= bbox["lon_min"] <= 180)
):
# We have sane Bounds provided, so we should use them
pass
else:
# No bounds or we've been passed bounds which aren't sane
bbox = None
# Use Lat/Lon/Zoom to center instead
lat = get_vars.get("lat", None)
if lat is not None:
lat = float(lat)
else:
lat = opts.get("lat", None)
if lat is None or lat == "":
lat = config.lat
lon = get_vars.get("lon", None)
if lon is not None:
lon = float(lon)
else:
lon = opts.get("lon", None)
if lon is None or lon == "":
lon = config.lon
if bbox:
# Calculate from Bounds
options["bbox"] = [bbox["lon_min"], # left
bbox["lat_min"], # bottom
bbox["lon_max"], # right
bbox["lat_max"], # top
]
else:
options["lat"] = lat
options["lon"] = lon
zoom = get_vars.get("zoom", None)
if zoom is not None:
zoom = int(zoom)
else:
zoom = opts.get("zoom", None)
if not zoom:
zoom = config.zoom
options["zoom"] = zoom or 1
options["numZoomLevels"] = config.zoom_levels
options["restrictedExtent"] = (config.lon_min,
config.lat_min,
config.lon_max,
config.lat_max,
)
############
# Projection
############
projection = opts.get("projection", None)
if not projection:
projection = config.epsg
options["projection"] = projection
if projection not in (900913, 4326):
# Test for Valid Projection file in Proj4JS library
projpath = os.path.join(
request.folder, "static", "scripts", "gis", "proj4js", \
"lib", "defs", "EPSG%s.js" % projection
)
try:
f = open(projpath, "r")
f.close()
except:
if projection:
proj4js = config.proj4js
if proj4js:
# Create it
try:
f = open(projpath, "w")
except IOError, e:
response.error = \
T("Map not available: Cannot write projection file - %s") % e
else:
f.write('''Proj4js.defs["EPSG:4326"]="%s"''' % proj4js)
f.close()
else:
response.warning = \
T("Map not available: Projection %(projection)s not supported - please add definition to %(path)s") % \
dict(projection = "'%s'" % projection,
path= "/static/scripts/gis/proj4js/lib/defs")
else:
response.error = \
T("Map not available: No Projection configured")
return None
options["maxExtent"] = config.maxExtent
options["units"] = config.units
########
# Marker
########
if config.marker_image:
options["marker_default"] = dict(i = config.marker_image,
h = config.marker_height,
w = config.marker_width,
)
# @ToDo: show_map() opts with fallback to settings
# Keep these in sync with scaleImage() in s3.gis.js
marker_max_height = settings.get_gis_marker_max_height()
if marker_max_height != 35:
options["max_h"] = marker_max_height
marker_max_width = settings.get_gis_marker_max_width()
if marker_max_width != 30:
options["max_w"] = marker_max_width
#########
# Colours
#########
# Keep these in sync with s3.gis.js
cluster_fill = settings.get_gis_cluster_fill()
if cluster_fill and cluster_fill != '8087ff':
options["cluster_fill"] = cluster_fill
cluster_stroke = settings.get_gis_cluster_stroke()
if cluster_stroke and cluster_stroke != '2b2f76':
options["cluster_stroke"] = cluster_stroke
select_fill = settings.get_gis_select_fill()
if select_fill and select_fill != 'ffdc33':
options["select_fill"] = select_fill
select_stroke = settings.get_gis_select_stroke()
if select_stroke and select_stroke != 'ff9933':
options["select_stroke"] = select_stroke
if not settings.get_gis_cluster_label():
options["cluster_label"] = False
########
# Layout
########
if not opts.get("closable", False):
options["windowNotClosable"] = True
if opts.get("window", False):
options["window"] = True
if opts.get("window_hide", False):
options["windowHide"] = True
if opts.get("maximizable", False):
options["maximizable"] = True
else:
options["maximizable"] = False
# Collapsed
if opts.get("collapsed", False):
options["west_collapsed"] = True
# LayerTree
if not settings.get_gis_layer_tree_base():
options["hide_base"] = True
if not settings.get_gis_layer_tree_overlays():
options["hide_overlays"] = True
if not settings.get_gis_layer_tree_expanded():
options["folders_closed"] = True
if settings.get_gis_layer_tree_radio():
options["folders_radio"] = True
#######
# Tools
#######
# Toolbar
if opts.get("toolbar", False):
options["toolbar"] = True
i18n["gis_length_message"] = T("The length is")
i18n["gis_length_tooltip"] = T("Measure Length: Click the points along the path & end with a double-click")
i18n["gis_zoomfull"] = T("Zoom to maximum map extent")
if settings.get_gis_geolocate_control():
# Presence of label turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_geoLocate"] = T("Zoom to Current Location")
# Search
if opts.get("search", False):
geonames_username = settings.get_gis_geonames_username()
if geonames_username:
# Presence of username turns feature on in s3.gis.js
options["geonames"] = geonames_username
# Presence of label adds support JS in Loader
i18n["gis_search"] = T("Search location in Geonames")
#i18n["gis_search_no_internet"] = T("Geonames.org search requires Internet connectivity!")
# Show NAV controls?
# e.g. removed within S3LocationSelector[Widget]
nav = opts.get("nav", None)
if nav is None:
nav = settings.get_gis_nav_controls()
if nav:
i18n["gis_zoominbutton"] = T("Zoom In: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_zoomout"] = T("Zoom Out: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_pan"] = T("Pan Map: keep the left mouse button pressed and drag the map")
i18n["gis_navPrevious"] = T("Previous View")
i18n["gis_navNext"] = T("Next View")
else:
options["nav"] = False
# Show Area control?
if opts.get("area", False):
options["area"] = True
i18n["gis_area_message"] = T("The area is")
i18n["gis_area_tooltip"] = T("Measure Area: Click the points around the polygon & end with a double-click")
# Show Color Picker?
color_picker = opts.get("color_picker", False)
if color_picker:
options["color_picker"] = True
if color_picker is not True:
options["draft_style"] = json.loads(color_picker)
#i18n["gis_color_picker_tooltip"] = T("Select Color")
i18n["gis_cancelText"] = T("cancel")
i18n["gis_chooseText"] = T("choose")
i18n["gis_togglePaletteMoreText"] = T("more")
i18n["gis_togglePaletteLessText"] = T("less")
i18n["gis_clearText"] = T("Clear Color Selection")
i18n["gis_noColorSelectedText"] = T("No Color Selected")
# Show Print control?
print_control = opts.get("print_control") is not False and settings.get_gis_print()
if print_control:
# @ToDo: Use internal Printing or External Service
# http://eden.sahanafoundation.org/wiki/BluePrint/GIS/Printing
#print_service = settings.get_gis_print_service()
#if print_service:
# print_tool = {"url": string, # URL of print service (e.g. http://localhost:8080/geoserver/pdf/)
# "mapTitle": string, # Title for the Printed Map (optional)
# "subTitle": string # subTitle for the Printed Map (optional)
# }
options["print"] = True
i18n["gis_print"] = T("Print")
i18n["gis_paper_size"] = T("Paper Size")
i18n["gis_print_tip"] = T("Take a screenshot of the map which can be printed")
# Show Save control?
# e.g. removed within S3LocationSelector[Widget]
if opts.get("save") is True and auth.s3_logged_in():
options["save"] = True
i18n["gis_save"] = T("Save: Default Lat, Lon & Zoom for the Viewport")
if MAP_ADMIN or (config.pe_id == auth.user.pe_id):
# Personal config or MapAdmin, so Save Button does Updates
options["config_id"] = config.id
# OSM Authoring
pe_id = auth.user.pe_id if auth.s3_logged_in() else None
if pe_id and s3db.auth_user_options_get_osm(pe_id):
# Presence of label turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_potlatch"] = T("Edit the OpenStreetMap data for this area")
i18n["gis_osm_zoom_closer"] = T("Zoom in closer to Edit OpenStreetMap layer")
# MGRS PDF Browser
mgrs = opts.get("mgrs", None)
if mgrs:
options["mgrs_name"] = mgrs["name"]
options["mgrs_url"] = mgrs["url"]
else:
# No toolbar
if opts.get("save") is True:
opts["save"] = "float"
# Show Save control?
# e.g. removed within S3LocationSelector[Widget]
if opts.get("save") == "float" and auth.s3_logged_in():
permit = auth.s3_has_permission
if permit("create", ctable):
options["save"] = "float"
i18n["gis_save_map"] = T("Save Map")
i18n["gis_new_map"] = T("Save as New Map?")
i18n["gis_name_map"] = T("Name of Map")
i18n["save"] = T("Save")
i18n["saved"] = T("Saved")
config_id = config.id
_config = db(ctable.id == config_id).select(ctable.uuid,
ctable.name,
limitby=(0, 1),
).first()
if MAP_ADMIN:
i18n["gis_my_maps"] = T("Saved Maps")
else:
options["pe_id"] = auth.user.pe_id
i18n["gis_my_maps"] = T("My Maps")
if permit("update", ctable, record_id=config_id):
options["config_id"] = config_id
options["config_name"] = _config.name
elif _config.uuid != "SITE_DEFAULT":
options["config_name"] = _config.name
# Legend panel
legend = opts.get("legend", False)
if legend:
i18n["gis_legend"] = T("Legend")
if legend == "float":
options["legend"] = "float"
if settings.get_gis_layer_metadata():
options["metadata"] = True
# MAP_ADMIN better for simpler deployments
#if auth.s3_has_permission("create", "cms_post_layer"):
if MAP_ADMIN:
i18n["gis_metadata_create"] = T("Create 'More Info'")
i18n["gis_metadata_edit"] = T("Edit 'More Info'")
else:
i18n["gis_metadata"] = T("More Info")
else:
options["legend"] = True
# Draw Feature Controls
if opts.get("add_feature", False):
i18n["gis_draw_feature"] = T("Add Point")
if opts.get("add_feature_active", False):
options["draw_feature"] = "active"
else:
options["draw_feature"] = "inactive"
if opts.get("add_line", False):
i18n["gis_draw_line"] = T("Add Line")
if opts.get("add_line_active", False):
options["draw_line"] = "active"
else:
options["draw_line"] = "inactive"
if opts.get("add_polygon", False):
i18n["gis_draw_polygon"] = T("Add Polygon")
if opts.get("add_polygon_active", False):
options["draw_polygon"] = "active"
else:
options["draw_polygon"] = "inactive"
# Clear Layers
clear_layers = opts.get("clear_layers") is not False and settings.get_gis_clear_layers()
if clear_layers:
options["clear_layers"] = clear_layers
i18n["gis_clearlayers"] = T("Clear all Layers")
# Layer Properties
if settings.get_gis_layer_properties():
# Presence of label turns feature on in s3.gis.js
i18n["gis_properties"] = T("Layer Properties")
# Upload Layer
if settings.get_gis_geoserver_password():
# Presence of label adds support JS in Loader and turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_uploadlayer"] = T("Upload Shapefile")
# WMS Browser
wms_browser = opts.get("wms_browser", None)
if wms_browser:
options["wms_browser_name"] = wms_browser["name"]
# urlencode the URL
options["wms_browser_url"] = urllib.quote(wms_browser["url"])
# Mouse Position
# 'normal', 'mgrs' or 'off'
mouse_position = opts.get("mouse_position", None)
if mouse_position is None:
mouse_position = settings.get_gis_mouse_position()
if mouse_position == "mgrs":
options["mouse_position"] = "mgrs"
# Tell loader to load support scripts
js_globals["mgrs"] = True
elif mouse_position:
options["mouse_position"] = True
# Overview Map
overview = opts.get("overview", None)
if overview is None:
overview = settings.get_gis_overview()
if not overview:
options["overview"] = False
# Permalink
permalink = opts.get("permalink", None)
if permalink is None:
permalink = settings.get_gis_permalink()
if not permalink:
options["permalink"] = False
# ScaleLine
scaleline = opts.get("scaleline", None)
if scaleline is None:
scaleline = settings.get_gis_scaleline()
if not scaleline:
options["scaleline"] = False
# Zoom control
zoomcontrol = opts.get("zoomcontrol", None)
if zoomcontrol is None:
zoomcontrol = settings.get_gis_zoomcontrol()
if not zoomcontrol:
options["zoomcontrol"] = False
zoomWheelEnabled = opts.get("zoomWheelEnabled", True)
if not zoomWheelEnabled:
options["no_zoom_wheel"] = True
########
# Layers
########
# Duplicate Features to go across the dateline?
# @ToDo: Action this again (e.g. for DRRPP)
if settings.get_gis_duplicate_features():
options["duplicate_features"] = True
# Features
features = opts.get("features", None)
if features:
options["features"] = addFeatures(features)
# Feature Queries
feature_queries = opts.get("feature_queries", None)
if feature_queries:
options["feature_queries"] = addFeatureQueries(feature_queries)
# Feature Resources
feature_resources = opts.get("feature_resources", None)
if feature_resources:
options["feature_resources"] = addFeatureResources(feature_resources)
# Layers
db = current.db
ltable = db.gis_layer_config
etable = db.gis_layer_entity
query = (ltable.deleted == False)
join = [etable.on(etable.layer_id == ltable.layer_id)]
fields = [etable.instance_type,
ltable.layer_id,
ltable.enabled,
ltable.visible,
ltable.base,
ltable.dir,
]
if opts.get("catalogue_layers", False):
# Add all enabled Layers from the Catalogue
stable = db.gis_style
mtable = db.gis_marker
query &= (ltable.config_id.belongs(config.ids))
join.append(ctable.on(ctable.id == ltable.config_id))
fields.extend((stable.style,
stable.cluster_distance,
stable.cluster_threshold,
stable.opacity,
stable.popup_format,
mtable.image,
mtable.height,
mtable.width,
ctable.pe_type))
left = [stable.on((stable.layer_id == etable.layer_id) & \
(stable.record_id == None) & \
((stable.config_id == ctable.id) | \
(stable.config_id == None))),
mtable.on(mtable.id == stable.marker_id),
]
limitby = None
# @ToDo: Need to fix this?: make the style lookup a different call
if settings.get_database_type() == "postgres":
# None is last
orderby = [ctable.pe_type, stable.config_id]
else:
# None is 1st
orderby = [ctable.pe_type, ~stable.config_id]
if settings.get_gis_layer_metadata():
cptable = s3db.cms_post_layer
left.append(cptable.on(cptable.layer_id == etable.layer_id))
fields.append(cptable.post_id)
else:
# Add just the default Base Layer
query &= (ltable.base == True) & \
(ltable.config_id == config.id)
# Base layer doesn't need a style
left = None
limitby = (0, 1)
orderby = None
layer_types = []
lappend = layer_types.append
layers = db(query).select(join=join,
left=left,
limitby=limitby,
orderby=orderby,
*fields)
if not layers:
# Use Site Default base layer
# (Base layer doesn't need a style)
query = (etable.id == ltable.layer_id) & \
(ltable.config_id == ctable.id) & \
(ctable.uuid == "SITE_DEFAULT") & \
(ltable.base == True) & \
(ltable.enabled == True)
layers = db(query).select(*fields,
limitby=(0, 1))
if not layers:
# Just show EmptyLayer
layer_types = [LayerEmpty]
for layer in layers:
layer_type = layer["gis_layer_entity.instance_type"]
if layer_type == "gis_layer_openstreetmap":
lappend(LayerOSM)
elif layer_type == "gis_layer_google":
# NB v3 doesn't work when initially hidden
lappend(LayerGoogle)
elif layer_type == "gis_layer_arcrest":
lappend(LayerArcREST)
elif layer_type == "gis_layer_bing":
lappend(LayerBing)
elif layer_type == "gis_layer_tms":
lappend(LayerTMS)
elif layer_type == "gis_layer_wms":
lappend(LayerWMS)
elif layer_type == "gis_layer_xyz":
lappend(LayerXYZ)
elif layer_type == "gis_layer_empty":
lappend(LayerEmpty)
elif layer_type == "gis_layer_js":
lappend(LayerJS)
elif layer_type == "gis_layer_theme":
lappend(LayerTheme)
elif layer_type == "gis_layer_geojson":
lappend(LayerGeoJSON)
elif layer_type == "gis_layer_gpx":
lappend(LayerGPX)
elif layer_type == "gis_layer_coordinate":
lappend(LayerCoordinate)
elif layer_type == "gis_layer_georss":
lappend(LayerGeoRSS)
elif layer_type == "gis_layer_kml":
lappend(LayerKML)
elif layer_type == "gis_layer_openweathermap":
lappend(LayerOpenWeatherMap)
elif layer_type == "gis_layer_shapefile":
lappend(LayerShapefile)
elif layer_type == "gis_layer_wfs":
lappend(LayerWFS)
elif layer_type == "gis_layer_feature":
lappend(LayerFeature)
# Make unique
layer_types = set(layer_types)
scripts = []
scripts_append = scripts.append
for LayerType in layer_types:
try:
# Instantiate the Class
layer = LayerType(layers)
layer.as_dict(options)
for script in layer.scripts:
scripts_append(script)
except Exception, exception:
error = "%s not shown: %s" % (LayerType.__name__, exception)
current.log.error(error)
if s3.debug:
raise HTTP(500, error)
else:
response.warning += error
# WMS getFeatureInfo
# (loads conditionally based on whether queryable WMS Layers have been added)
if s3.gis.get_feature_info and settings.get_gis_getfeature_control():
# Presence of label turns feature on
# @ToDo: Provide explicit option to support multiple maps in a page
# with different options
i18n["gis_get_feature_info"] = T("Get Feature Info")
i18n["gis_feature_info"] = T("Feature Info")
# Callback can be set before _setup()
if not self.callback:
self.callback = opts.get("callback", "DEFAULT")
# These can be read/modified after _setup() & before xml()
self.options = options
self.globals = js_globals
self.i18n = i18n
self.scripts = scripts
# Set up map plugins
# - currently just used by Climate
# @ToDo: Get these working with new loader
# This, and any code it generates, is done last
# However, map plugin should not assume this.
self.plugin_callbacks = []
plugins = opts.get("plugins", None)
if plugins:
for plugin in plugins:
plugin.extend_gis_map(self)
# Flag to xml() that we've already been run
self.setup = True
return options
# -------------------------------------------------------------------------
def xml(self):
"""
Render the Map
- this is primarily done by inserting a lot of JavaScript
- CSS loaded as-standard to avoid delays in page loading
- HTML added in init() as a component
"""
if not self.setup:
self._setup()
# Add ExtJS
# @ToDo: Do this conditionally on whether Ext UI is used
s3_include_ext()
dumps = json.dumps
s3 = current.response.s3
js_global = s3.js_global
js_global_append = js_global.append
i18n_dict = self.i18n
i18n = []
i18n_append = i18n.append
for key, val in i18n_dict.items():
line = '''i18n.%s="%s"''' % (key, val)
if line not in i18n:
i18n_append(line)
i18n = '''\n'''.join(i18n)
if i18n not in js_global:
js_global_append(i18n)
globals_dict = self.globals
js_globals = []
for key, val in globals_dict.items():
line = '''S3.gis.%s=%s''' % (key, dumps(val, separators=SEPARATORS))
if line not in js_globals:
js_globals.append(line)
js_globals = '''\n'''.join(js_globals)
if js_globals not in js_global:
js_global_append(js_globals)
debug = s3.debug
scripts = s3.scripts
if s3.cdn:
if debug:
script = \
"//cdnjs.cloudflare.com/ajax/libs/underscore.js/1.6.0/underscore.js"
else:
script = \
"//cdnjs.cloudflare.com/ajax/libs/underscore.js/1.6.0/underscore-min.js"
else:
if debug:
script = URL(c="static", f="scripts/underscore.js")
else:
script = URL(c="static", f="scripts/underscore-min.js")
if script not in scripts:
scripts.append(script)
if self.opts.get("color_picker", False):
if debug:
script = URL(c="static", f="scripts/spectrum.js")
else:
script = URL(c="static", f="scripts/spectrum.min.js")
if script not in scripts:
scripts.append(script)
if debug:
script = URL(c="static", f="scripts/S3/s3.gis.loader.js")
else:
script = URL(c="static", f="scripts/S3/s3.gis.loader.min.js")
if script not in scripts:
scripts.append(script)
callback = self.callback
map_id = self.id
options = self.options
projection = options["projection"]
try:
options = dumps(options, separators=SEPARATORS)
except Exception, exception:
current.log.error("Map %s failed to initialise" % map_id, exception)
plugin_callbacks = '''\n'''.join(self.plugin_callbacks)
if callback:
if callback == "DEFAULT":
if map_id == "default_map":
callback = '''S3.gis.show_map(null,%s)''' % options
else:
callback = '''S3.gis.show_map(%s,%s)''' % (map_id, options)
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id,
options))
script = URL(c="static", f="scripts/yepnope.1.5.4-min.js")
if script not in scripts:
scripts.append(script)
if plugin_callbacks:
callback = '''%s\n%s''' % (callback, plugin_callbacks)
callback = '''function(){%s}''' % callback
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id, options))
if plugin_callbacks:
callback = '''function(){%s}''' % plugin_callbacks
else:
callback = '''null'''
loader = \
'''s3_gis_loadjs(%(debug)s,%(projection)s,%(callback)s,%(scripts)s)''' \
% dict(debug = "true" if s3.debug else "false",
projection = projection,
callback = callback,
scripts = self.scripts
)
jquery_ready = s3.jquery_ready
if loader not in jquery_ready:
jquery_ready.append(loader)
# Return the HTML
return super(MAP, self).xml()
# =============================================================================
def addFeatures(features):
"""
Add Simple Features to the Draft layer
- used by S3LocationSelectorWidget
"""
simplify = GIS.simplify
_f = []
append = _f.append
for feature in features:
geojson = simplify(feature, output="geojson")
if geojson:
f = dict(type = "Feature",
geometry = json.loads(geojson))
append(f)
return _f
# =============================================================================
def addFeatureQueries(feature_queries):
"""
Add Feature Queries to the map
- These can be Rows or Storage()
NB These considerations need to be taken care of before arriving here:
Security of data
Localisation of name/popup_label
"""
db = current.db
s3db = current.s3db
cache = s3db.cache
request = current.request
controller = request.controller
function = request.function
fqtable = s3db.gis_feature_query
mtable = s3db.gis_marker
auth = current.auth
auth_user = auth.user
if auth_user:
created_by = auth_user.id
s3_make_session_owner = auth.s3_make_session_owner
else:
# Anonymous
# @ToDo: A deployment with many Anonymous Feature Queries being
# accessed will need to change this design - e.g. use session ID instead
created_by = None
layers_feature_query = []
append = layers_feature_query.append
for layer in feature_queries:
name = str(layer["name"])
_layer = dict(name=name)
name_safe = re.sub("\W", "_", name)
# Lat/Lon via Join or direct?
try:
layer["query"][0].gis_location.lat
join = True
except:
join = False
# Push the Features into a temporary table in order to have them accessible via GeoJSON
# @ToDo: Maintenance Script to clean out old entries (> 24 hours?)
cname = "%s_%s_%s" % (name_safe,
controller,
function)
# Clear old records
query = (fqtable.name == cname) & \
(fqtable.created_by == created_by)
db(query).delete()
for row in layer["query"]:
rowdict = {"name" : cname}
if join:
rowdict["lat"] = row.gis_location.lat
rowdict["lon"] = row.gis_location.lon
else:
rowdict["lat"] = row["lat"]
rowdict["lon"] = row["lon"]
if "popup_url" in row:
rowdict["popup_url"] = row["popup_url"]
if "popup_label" in row:
rowdict["popup_label"] = row["popup_label"]
if "marker" in row:
rowdict["marker_url"] = URL(c="static", f="img",
args=["markers",
row["marker"].image])
rowdict["marker_height"] = row["marker"].height
rowdict["marker_width"] = row["marker"].width
else:
if "marker_url" in row:
rowdict["marker_url"] = row["marker_url"]
if "marker_height" in row:
rowdict["marker_height"] = row["marker_height"]
if "marker_width" in row:
rowdict["marker_width"] = row["marker_width"]
if "shape" in row:
rowdict["shape"] = row["shape"]
if "size" in row:
rowdict["size"] = row["size"]
if "colour" in row:
rowdict["colour"] = row["colour"]
if "opacity" in row:
rowdict["opacity"] = row["opacity"]
record_id = fqtable.insert(**rowdict)
if not created_by:
s3_make_session_owner(fqtable, record_id)
# URL to retrieve the data
url = "%s.geojson?feature_query.name=%s&feature_query.created_by=%s" % \
(URL(c="gis", f="feature_query"),
cname,
created_by)
_layer["url"] = url
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
if "marker" in layer:
# per-Layer Marker
marker = layer["marker"]
if isinstance(marker, int):
# integer (marker_id) not row
marker = db(mtable.id == marker).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=cache
).first()
if marker:
# @ToDo: Single option as Marker.as_json_dict()
_layer["marker_url"] = marker["image"]
_layer["marker_height"] = marker["height"]
_layer["marker_width"] = marker["width"]
if "opacity" in layer and layer["opacity"] != 1:
_layer["opacity"] = "%.1f" % layer["opacity"]
if "cluster_attribute" in layer and \
layer["cluster_attribute"] != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = layer["cluster_attribute"]
if "cluster_distance" in layer and \
layer["cluster_distance"] != CLUSTER_DISTANCE:
_layer["cluster_distance"] = layer["cluster_distance"]
if "cluster_threshold" in layer and \
layer["cluster_threshold"] != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = layer["cluster_threshold"]
append(_layer)
return layers_feature_query
# =============================================================================
def addFeatureResources(feature_resources):
"""
Add Feature Resources to the map
- REST URLs to back-end resources
"""
T = current.T
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
ltable = s3db.gis_layer_config
# Better to do a separate query
#mtable = s3db.gis_marker
stable = db.gis_style
config = GIS.get_config()
config_id = config.id
postgres = current.deployment_settings.get_database_type() == "postgres"
layers_feature_resource = []
append = layers_feature_resource.append
for layer in feature_resources:
name = str(layer["name"])
_layer = dict(name=name)
_id = str(layer["id"])
_id = re.sub("\W", "_", _id)
_layer["id"] = _id
# Are we loading a Catalogue Layer or a simple URL?
layer_id = layer.get("layer_id", None)
if layer_id:
query = (ftable.layer_id == layer_id)
left = [ltable.on((ltable.layer_id == layer_id) & \
(ltable.config_id == config_id)),
stable.on((stable.layer_id == layer_id) & \
((stable.config_id == config_id) | \
(stable.config_id == None)) & \
(stable.record_id == None) & \
(stable.aggregate == False)),
# Better to do a separate query
#mtable.on(mtable.id == stable.marker_id),
]
# @ToDo: Need to fix this?: make the style lookup a different call
if postgres:
# None is last
orderby = stable.config_id
else:
# None is 1st
orderby = ~stable.config_id
row = db(query).select(ftable.layer_id,
ftable.controller,
ftable.function,
ftable.filter,
ftable.aggregate,
ftable.trackable,
ftable.use_site,
# @ToDo: Deprecate Legacy
ftable.popup_fields,
# @ToDo: Deprecate Legacy
ftable.popup_label,
ftable.cluster_attribute,
ltable.dir,
# Better to do a separate query
#mtable.image,
#mtable.height,
#mtable.width,
stable.marker_id,
stable.opacity,
stable.popup_format,
# @ToDo: If-required
#stable.url_format,
stable.cluster_distance,
stable.cluster_threshold,
stable.style,
left=left,
limitby=(0, 1),
orderby=orderby,
).first()
_dir = layer.get("dir", row["gis_layer_config.dir"])
# Better to do a separate query
#_marker = row["gis_marker"]
_style = row["gis_style"]
row = row["gis_layer_feature"]
if row.use_site:
maxdepth = 1
else:
maxdepth = 0
opacity = layer.get("opacity", _style.opacity) or 1
cluster_attribute = layer.get("cluster_attribute",
row.cluster_attribute) or \
CLUSTER_ATTRIBUTE
cluster_distance = layer.get("cluster_distance",
_style.cluster_distance) or \
CLUSTER_DISTANCE
cluster_threshold = layer.get("cluster_threshold",
_style.cluster_threshold)
if cluster_threshold is None:
cluster_threshold = CLUSTER_THRESHOLD
style = layer.get("style", None)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
current.log.error("Invalid Style: %s" % style)
style = None
else:
style = _style.style
#url_format = _style.url_format
aggregate = layer.get("aggregate", row.aggregate)
if aggregate:
url = "%s.geojson?layer=%i&show_ids=true" % \
(URL(c=row.controller, f=row.function, args="report"),
row.layer_id)
#if not url_format:
# Use gis/location controller in all reports
url_format = "%s/{id}.plain" % URL(c="gis", f="location")
else:
_url = URL(c=row.controller, f=row.function)
url = "%s.geojson?layer=%i&components=None&show_ids=true&maxdepth=%s" % \
(_url,
row.layer_id,
maxdepth)
#if not url_format:
url_format = "%s/{id}.plain" % _url
# Use specified filter or fallback to the one in the layer
_filter = layer.get("filter", row.filter)
if _filter:
url = "%s&%s" % (url, _filter)
if row.trackable:
url = "%s&track=1" % url
if not style:
marker = layer.get("marker")
if marker:
marker = Marker(marker).as_json_dict()
elif _style.marker_id:
marker = Marker(marker_id=_style.marker_id).as_json_dict()
popup_format = _style.popup_format
if not popup_format:
# Old-style
popup_fields = row["popup_fields"]
if popup_fields:
popup_label = row["popup_label"]
if popup_label:
popup_format = "{%s} (%s)" % (popup_fields[0],
current.T(popup_label))
else:
popup_format = "%s" % popup_fields[0]
for f in popup_fields[1:]:
popup_format = "%s<br />{%s}" % (popup_format, f)
else:
# URL to retrieve the data
url = layer["url"]
tablename = layer["tablename"]
table = s3db[tablename]
# Optimise the query
if "location_id" in table.fields:
maxdepth = 0
elif "site_id" in table.fields:
maxdepth = 1
elif tablename == "gis_location":
maxdepth = 0
else:
# Not much we can do!
# @ToDo: Use Context
continue
options = "components=None&maxdepth=%s&show_ids=true" % maxdepth
if "?" in url:
url = "%s&%s" % (url, options)
else:
url = "%s?%s" % (url, options)
opacity = layer.get("opacity", 1)
cluster_attribute = layer.get("cluster_attribute",
CLUSTER_ATTRIBUTE)
cluster_distance = layer.get("cluster_distance",
CLUSTER_DISTANCE)
cluster_threshold = layer.get("cluster_threshold",
CLUSTER_THRESHOLD)
_dir = layer.get("dir", None)
style = layer.get("style", None)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
current.log.error("Invalid Style: %s" % style)
style = None
if not style:
marker = layer.get("marker", None)
if marker:
marker = Marker(marker).as_json_dict()
popup_format = layer.get("popup_format")
url_format = layer.get("url_format")
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
if opacity != 1:
_layer["opacity"] = "%.1f" % opacity
if popup_format:
if "T(" in popup_format:
# i18n
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
_layer["popup_format"] = popup_format
if url_format:
_layer["url_format"] = url_format
if cluster_attribute != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
_layer["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = cluster_threshold
if _dir:
_layer["dir"] = _dir
if style:
_layer["style"] = style
elif marker:
# Per-layer Marker
_layer["marker"] = marker
else:
# Request the server to provide per-feature Markers
url = "%s&markers=1" % url
_layer["url"] = url
append(_layer)
return layers_feature_resource
# =============================================================================
class Layer(object):
"""
Abstract base class for Layers from Catalogue
"""
def __init__(self, all_layers):
sublayers = []
append = sublayers.append
# List of Scripts to load async with the Map JavaScript
self.scripts = []
s3_has_role = current.auth.s3_has_role
tablename = self.tablename
table = current.s3db[tablename]
fields = table.fields
metafields = s3_all_meta_field_names()
fields = [table[f] for f in fields if f not in metafields]
layer_ids = [row["gis_layer_config.layer_id"] for row in all_layers if \
row["gis_layer_entity.instance_type"] == tablename]
query = (table.layer_id.belongs(set(layer_ids)))
rows = current.db(query).select(*fields)
SubLayer = self.SubLayer
# Flag to show whether we've set the default baselayer
# (otherwise a config higher in the hierarchy can overrule one lower down)
base = True
# Layers requested to be visible via URL (e.g. embedded map)
visible = current.request.get_vars.get("layers", None)
if visible:
visible = visible.split(".")
else:
visible = []
metadata = current.deployment_settings.get_gis_layer_metadata()
styled = self.style
for record in rows:
layer_id = record.layer_id
# Find the 1st row in all_layers which matches this
for row in all_layers:
if row["gis_layer_config.layer_id"] == layer_id:
layer_config = row["gis_layer_config"]
break
# Check if layer is enabled
if layer_config.enabled is False:
continue
# Check user is allowed to access the layer
role_required = record.role_required
if role_required and not s3_has_role(role_required):
continue
# All OK - add SubLayer
record["visible"] = layer_config.visible or str(layer_id) in visible
if base and layer_config.base:
# var name can't conflict with OSM/WMS/ArcREST layers
record["_base"] = True
base = False
else:
record["_base"] = False
record["dir"] = layer_config.dir
if styled:
style = row.get("gis_style", None)
if style:
style_dict = style.style
if isinstance(style_dict, basestring):
# Matryoshka?
try:
style_dict = json.loads(style_dict)
except ValueError:
pass
if style_dict:
record["style"] = style_dict
else:
record["style"] = None
marker = row.get("gis_marker", None)
if marker:
record["marker"] = Marker(marker)
#if style.marker_id:
# record["marker"] = Marker(marker_id=style.marker_id)
else:
# Default Marker?
record["marker"] = Marker(tablename=tablename)
record["opacity"] = style.opacity or 1
record["popup_format"] = style.popup_format
record["cluster_distance"] = style.cluster_distance or CLUSTER_DISTANCE
if style.cluster_threshold != None:
record["cluster_threshold"] = style.cluster_threshold
else:
record["cluster_threshold"] = CLUSTER_THRESHOLD
else:
record["style"] = None
record["opacity"] = 1
record["popup_format"] = None
record["cluster_distance"] = CLUSTER_DISTANCE
record["cluster_threshold"] = CLUSTER_THRESHOLD
# Default Marker?
record["marker"] = Marker(tablename=tablename)
if metadata:
post_id = row.get("cms_post_layer.post_id", None)
record["post_id"] = post_id
if tablename in ("gis_layer_bing", "gis_layer_google"):
# SubLayers handled differently
append(record)
else:
append(SubLayer(record))
# Alphasort layers
# - client will only sort within their type: s3.gis.layers.js
self.sublayers = sorted(sublayers, key=lambda row: row.name)
# -------------------------------------------------------------------------
def as_dict(self, options=None):
"""
Output the Layers as a Python dict
"""
sublayer_dicts = []
append = sublayer_dicts.append
sublayers = self.sublayers
for sublayer in sublayers:
# Read the output dict for this sublayer
sublayer_dict = sublayer.as_dict()
if sublayer_dict:
# Add this layer to the list of layers for this layer type
append(sublayer_dict)
if sublayer_dicts:
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -------------------------------------------------------------------------
def as_json(self):
"""
Output the Layers as JSON
"""
result = self.as_dict()
if result:
#return json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True)
return json.dumps(result, separators=SEPARATORS)
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layers as global Javascript
- suitable for inclusion in the HTML page
"""
result = self.as_json()
if result:
return '''S3.gis.%s=%s\n''' % (self.dictname, result)
# -------------------------------------------------------------------------
class SubLayer(object):
def __init__(self, record):
# Ensure all attributes available (even if Null)
self.__dict__.update(record)
del record
if current.deployment_settings.get_L10n_translate_gis_layer():
self.safe_name = re.sub('[\\"]', "", s3_unicode(current.T(self.name)))
else:
self.safe_name = re.sub('[\\"]', "", self.name)
if hasattr(self, "projection_id"):
self.projection = Projection(self.projection_id)
def setup_clustering(self, output):
if hasattr(self, "cluster_attribute"):
cluster_attribute = self.cluster_attribute
else:
cluster_attribute = None
cluster_distance = self.cluster_distance
cluster_threshold = self.cluster_threshold
if cluster_attribute and \
cluster_attribute != CLUSTER_ATTRIBUTE:
output["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
output["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
output["cluster_threshold"] = cluster_threshold
def setup_folder(self, output):
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
def setup_folder_and_visibility(self, output):
if not self.visible:
output["visibility"] = False
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
def setup_folder_visibility_and_opacity(self, output):
if not self.visible:
output["visibility"] = False
if self.opacity != 1:
output["opacity"] = "%.1f" % self.opacity
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
# ---------------------------------------------------------------------
@staticmethod
def add_attributes_if_not_default(output, **values_and_defaults):
# could also write values in debug mode, to check if defaults ignored.
# could also check values are not being overwritten.
for key, (value, defaults) in values_and_defaults.iteritems():
if value not in defaults:
output[key] = value
# -----------------------------------------------------------------------------
class LayerArcREST(Layer):
"""
ArcGIS REST Layers from Catalogue
"""
tablename = "gis_layer_arcrest"
dictname = "layers_arcrest"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "arcrest",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_and_visibility(output)
self.add_attributes_if_not_default(
output,
layers = (self.layers, ([0],)),
transparent = (self.transparent, (True,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
)
return output
# -----------------------------------------------------------------------------
class LayerBing(Layer):
"""
Bing Layers from Catalogue
"""
tablename = "gis_layer_bing"
dictname = "Bing"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if Projection().epsg != 900913:
raise Exception("Cannot display Bing layers unless we're using the Spherical Mercator Projection\n")
apikey = current.deployment_settings.get_gis_api_bing()
if not apikey:
raise Exception("Cannot display Bing layers unless we have an API key\n")
# Mandatory attributes
ldict = {"ApiKey": apikey
}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "aerial":
ldict["Aerial"] = {"name": sublayer.name or "Bing Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "road":
ldict["Road"] = {"name": sublayer.name or "Bing Roads",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Bing Hybrid",
"id": sublayer.layer_id}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerCoordinate(Layer):
"""
Coordinate Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_coordinate"
dictname = "CoordinateGrid"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name_safe = re.sub("'", "", sublayer.name)
ldict = dict(name = name_safe,
visibility = sublayer.visible,
id = sublayer.layer_id)
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerEmpty(Layer):
"""
Empty Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_empty"
dictname = "EmptyLayer"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name = s3_unicode(current.T(sublayer.name))
name_safe = re.sub("'", "", name)
ldict = dict(name = name_safe,
id = sublayer.layer_id)
if sublayer._base:
ldict["base"] = True
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerFeature(Layer):
"""
Feature Layers from Catalogue
"""
tablename = "gis_layer_feature"
dictname = "layers_feature"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def __init__(self, record):
controller = record.controller
self.skip = False
if controller is not None:
if controller not in current.deployment_settings.modules:
# Module is disabled
self.skip = True
if not current.auth.permission.has_permission("read",
c=controller,
f=record.function):
# User has no permission to this resource (in ACL)
self.skip = True
else:
error = "Feature Layer Record '%s' has no controller" % \
record.name
raise Exception(error)
super(LayerFeature.SubLayer, self).__init__(record)
def as_dict(self):
if self.skip:
# Skip layer
return
if self.use_site:
maxdepth = 1
else:
maxdepth = 0
if self.aggregate:
# id is used for url_format
url = "%s.geojson?layer=%i&show_ids=true" % \
(URL(c=self.controller, f=self.function, args="report"),
self.layer_id)
# Use gis/location controller in all reports
url_format = "%s/{id}.plain" % URL(c="gis", f="location")
else:
_url = URL(self.controller, self.function)
# id is used for url_format
url = "%s.geojson?layer=%i&components=None&maxdepth=%s&show_ids=true" % \
(_url,
self.layer_id,
maxdepth)
url_format = "%s/{id}.plain" % _url
if self.filter:
url = "%s&%s" % (url, self.filter)
if self.trackable:
url = "%s&track=1" % url
# Mandatory attributes
output = {"id": self.layer_id,
# Defaults client-side if not-provided
#"type": "feature",
"name": self.safe_name,
"url_format": url_format,
"url": url,
}
popup_format = self.popup_format
if popup_format:
# New-style
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
else:
# @ToDo: Deprecate
popup_fields = self.popup_fields
if popup_fields:
# Old-style
popup_label = self.popup_label
if popup_label:
popup_format = "{%s} (%s)" % (popup_fields[0],
current.T(popup_label))
else:
popup_format = "%s" % popup_fields[0]
for f in popup_fields[1:]:
popup_format = "%s<br/>{%s}" % (popup_format, f)
output["popup_format"] = popup_format or ""
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.aggregate:
# Enable the Cluster Strategy, so that it can be enabled/disabled
# depending on the zoom level & hence Points or Polygons
output["cluster"] = 1
if not popup_format:
# Need this to differentiate from e.g. FeatureQueries
output["no_popups"] = 1
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerGeoJSON(Layer):
"""
GeoJSON Layers from Catalogue
"""
tablename = "gis_layer_geojson"
dictname = "layers_geojson"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "geojson",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
projection = self.projection
if projection.epsg != 4326:
output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
popup_format = self.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
return output
# -----------------------------------------------------------------------------
class LayerGeoRSS(Layer):
"""
GeoRSS Layers from Catalogue
"""
tablename = "gis_layer_georss"
dictname = "layers_georss"
style = True
def __init__(self, all_layers):
super(LayerGeoRSS, self).__init__(all_layers)
LayerGeoRSS.SubLayer.cachetable = current.s3db.gis_cache
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
response = current.response
cachetable = self.cachetable
url = self.url
# Check to see if we should Download layer to the cache
download = True
query = (cachetable.source == url)
existing_cached_copy = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if existing_cached_copy:
modified_on = existing_cached_copy.modified_on
cutoff = modified_on + datetime.timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download layer to the Cache
from gluon.tools import fetch
# @ToDo: Call directly without going via HTTP
# @ToDo: Make this async by using S3Task (also use this for the refresh time)
fields = ""
if self.data:
fields = "&data_field=%s" % self.data
if self.image:
fields = "%s&image_field=%s" % (fields, self.image)
_url = "%s%s/update.georss?fetchurl=%s%s" % (current.deployment_settings.get_base_public_url(),
URL(c="gis", f="cache_feed"),
url,
fields)
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
# @ToDo: Need to commit to not have DB locked with SQLite?
fetch(_url, cookie=cookie)
if existing_cached_copy:
# Clear old selfs which are no longer active
query = (cachetable.source == url) & \
(cachetable.modified_on < cutoff)
db(query).delete()
except Exception, exception:
current.log.error("GeoRSS %s download error" % url, exception)
# Feed down
if existing_cached_copy:
# Use cached copy
# Should we Update timestamp to prevent every
# subsequent request attempting the download?
#query = (cachetable.source == url)
#db(query).update(modified_on=request.utcnow)
pass
else:
response.warning += "%s down & no cached copy available" % url
name_safe = self.safe_name
# Pass the GeoJSON URL to the client
# Filter to the source of this feed
url = "%s.geojson?cache.source=%s" % (URL(c="gis", f="cache_feed"),
url)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "georss",
"name": name_safe,
"url": url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
if self.refresh != 900:
output["refresh"] = self.refresh
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerGoogle(Layer):
"""
Google Layers/Tools from Catalogue
"""
tablename = "gis_layer_google"
dictname = "Google"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
T = current.T
epsg = (Projection().epsg == 900913)
settings = current.deployment_settings
apikey = settings.get_gis_api_google()
s3 = current.response.s3
debug = s3.debug
# Google scripts use document.write so cannot be loaded async via yepnope.js
s3_scripts = s3.scripts
ldict = {}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer.type == "earth":
ldict["Earth"] = str(T("Switch to 3D"))
#{"modules":[{"name":"earth","version":"1"}]}
script = "//www.google.com/jsapi?key=" + apikey + "&autoload=%7B%22modules%22%3A%5B%7B%22name%22%3A%22earth%22%2C%22version%22%3A%221%22%7D%5D%7D"
if script not in s3_scripts:
s3_scripts.append(script)
# Dynamic Loading not supported: https://developers.google.com/loader/#Dynamic
#s3.jquery_ready.append('''try{google.load('earth','1')catch(e){}''')
if debug:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.min.js")
s3.js_global.append('''S3.public_url="%s"''' % settings.get_base_public_url())
elif epsg:
# Earth is the only layer which can run in non-Spherical Mercator
# @ToDo: Warning?
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "satellite":
ldict["Satellite"] = {"name": sublayer.name or "Google Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "maps":
ldict["Maps"] = {"name": sublayer.name or "Google Maps",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Google Hybrid",
"id": sublayer.layer_id}
elif sublayer.type == "streetview":
ldict["StreetviewButton"] = "Click where you want to open Streetview"
elif sublayer.type == "terrain":
ldict["Terrain"] = {"name": sublayer.name or "Google Terrain",
"id": sublayer.layer_id}
elif sublayer.type == "mapmaker":
ldict["MapMaker"] = {"name": sublayer.name or "Google MapMaker",
"id": sublayer.layer_id}
elif sublayer.type == "mapmakerhybrid":
ldict["MapMakerHybrid"] = {"name": sublayer.name or "Google MapMaker Hybrid",
"id": sublayer.layer_id}
if "MapMaker" in ldict or "MapMakerHybrid" in ldict:
# Need to use v2 API
# This should be able to be fixed in OpenLayers now since Google have fixed in v3 API:
# http://code.google.com/p/gmaps-api-issues/issues/detail?id=2349#c47
script = "//maps.google.com/maps?file=api&v=2&key=%s" % apikey
if script not in s3_scripts:
s3_scripts.append(script)
else:
# v3 API (3.16 is frozen, 3.17 release & 3.18 is nightly)
script = "//maps.google.com/maps/api/js?v=3.17&sensor=false"
if script not in s3_scripts:
s3_scripts.append(script)
if "StreetviewButton" in ldict:
# Streetview doesn't work with v2 API
ldict["StreetviewButton"] = str(T("Click where you want to open Streetview"))
ldict["StreetviewTitle"] = str(T("Street View"))
if debug:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.min.js")
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerGPX(Layer):
"""
GPX Layers from Catalogue
"""
tablename = "gis_layer_gpx"
dictname = "layers_gpx"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = URL(c="default", f="download",
args=self.track)
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.marker.add_attributes_to_output(output)
self.add_attributes_if_not_default(
output,
waypoints = (self.waypoints, (True,)),
tracks = (self.tracks, (True,)),
routes = (self.routes, (True,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerJS(Layer):
"""
JS Layers from Catalogue
- these are raw Javascript layers for use by expert OpenLayers people
to quickly add/configure new data sources without needing support
from back-end Sahana programmers
"""
tablename = "gis_layer_js"
dictname = "layers_js"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer_dicts = []
append = sublayer_dicts.append
for sublayer in sublayers:
append(sublayer.code)
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -----------------------------------------------------------------------------
class LayerKML(Layer):
"""
KML Layers from Catalogue
"""
tablename = "gis_layer_kml"
dictname = "layers_kml"
style = True
# -------------------------------------------------------------------------
def __init__(self, all_layers, init=True):
"Set up the KML cache, should be done once per request"
super(LayerKML, self).__init__(all_layers)
# Can we cache downloaded KML feeds?
# Needed for unzipping & filtering as well
# @ToDo: Should we move this folder to static to speed up access to cached content?
# Do we need to secure it?
request = current.request
cachepath = os.path.join(request.folder,
"uploads",
"gis_cache")
if os.path.exists(cachepath):
cacheable = os.access(cachepath, os.W_OK)
else:
try:
os.mkdir(cachepath)
except OSError, os_error:
current.log.error("GIS: KML layers cannot be cached: %s %s" % \
(cachepath, os_error))
cacheable = False
else:
cacheable = True
# @ToDo: Migrate to gis_cache
LayerKML.cachetable = current.s3db.gis_cache2
LayerKML.cacheable = cacheable
LayerKML.cachepath = cachepath
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
cachetable = LayerKML.cachetable
cacheable = LayerKML.cacheable
#cachepath = LayerKML.cachepath
name = self.name
if cacheable:
_name = urllib2.quote(name)
_name = _name.replace("%", "_")
filename = "%s.file.%s.kml" % (cachetable._tablename,
_name)
# Should we download a fresh copy of the source file?
download = True
query = (cachetable.name == name)
cached = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if cached:
modified_on = cached.modified_on
cutoff = modified_on + datetime.timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download file (async, if workers alive)
response = current.response
session_id_name = response.session_id_name
session_id = response.session_id
current.s3task.async("gis_download_kml",
args=[self.id, filename, session_id_name, session_id])
if cached:
db(query).update(modified_on=request.utcnow)
else:
cachetable.insert(name=name, file=filename)
url = URL(c="default", f="download",
args=[filename])
else:
# No caching possible (e.g. GAE), display file direct from remote (using Proxy)
# (Requires OpenLayers.Layer.KML to be available)
url = self.url
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = url,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
title = (self.title, ("name", None, "")),
body = (self.body, ("description", None)),
refresh = (self.refresh, (900,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerOSM(Layer):
"""
OpenStreetMap Layers from Catalogue
@ToDo: Provide a catalogue of standard layers which are fully-defined
in static & can just have name over-ridden, as well as
fully-custom layers.
"""
tablename = "gis_layer_openstreetmap"
dictname = "layers_osm"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if Projection().epsg != 900913:
# Cannot display OpenStreetMap layers unless we're using the Spherical Mercator Projection
return {}
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url1": self.url1,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
base = (self.base, (True,)),
_base = (self._base, (False,)),
url2 = (self.url2, ("",)),
url3 = (self.url3, ("",)),
zoomLevels = (self.zoom_levels, (9,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder_and_visibility(output)
return output
# -----------------------------------------------------------------------------
class LayerOpenWeatherMap(Layer):
"""
OpenWeatherMap Layers from Catalogue
"""
tablename = "gis_layer_openweathermap"
dictname = "OWM"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if current.response.s3.debug:
self.scripts.append("gis/OWM.OpenLayers.js")
else:
self.scripts.append("gis/OWM.OpenLayers.min.js")
ldict = {}
for sublayer in sublayers:
if sublayer.type == "station":
ldict["station"] = {"name": sublayer.name or "Weather Stations",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
elif sublayer.type == "city":
ldict["city"] = {"name": sublayer.name or "Current Weather",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerShapefile(Layer):
"""
Shapefile Layers from Catalogue
"""
tablename = "gis_layer_shapefile"
dictname = "layers_shapefile"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s/%s/data.geojson" % \
(URL(c="gis", f="layer_shapefile"), self.id)
if self.filter:
url = "%s?layer_shapefile_%s.%s" % (url, self.id, self.filter)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "shapefile",
"name": self.safe_name,
"url": url,
# Shapefile layers don't alter their contents, so don't refresh
"refresh": 0,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
# We convert on-upload to have BBOX handling work properly
#projection = self.projection
#if projection.epsg != 4326:
# output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerTheme(Layer):
"""
Theme Layers from Catalogue
"""
tablename = "gis_layer_theme"
dictname = "layers_theme"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s.geojson?theme_data.layer_theme_id=%i&polygons=1&maxdepth=0" % \
(URL(c="gis", f="theme_data"), self.id)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "theme",
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
output["style"] = style
return output
# -----------------------------------------------------------------------------
class LayerTMS(Layer):
"""
TMS Layers from Catalogue
"""
tablename = "gis_layer_tms"
dictname = "layers_tms"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "tms",
"name": self.safe_name,
"url": self.url,
"layername": self.layername
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# -----------------------------------------------------------------------------
class LayerWFS(Layer):
"""
WFS Layers from Catalogue
"""
tablename = "gis_layer_wfs"
dictname = "layers_wfs"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
title = self.title,
featureType = self.featureType,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
version = (self.version, ("1.1.0",)),
featureNS = (self.featureNS, (None, "")),
geometryName = (self.geometryName, ("the_geom",)),
schema = (self.wfs_schema, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
projection = (self.projection.epsg, (4326,)),
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
refresh = (self.refresh, (0,)),
#editable
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerWMS(Layer):
"""
WMS Layers from Catalogue
"""
tablename = "gis_layer_wms"
dictname = "layers_wms"
style = False
# -------------------------------------------------------------------------
def __init__(self, all_layers):
super(LayerWMS, self).__init__(all_layers)
if self.sublayers:
if current.response.s3.debug:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.js")
else:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.min.js")
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if self.queryable:
current.response.s3.gis.get_feature_info = True
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
layers = self.layers
)
# Attributes which are defaulted client-side if not set
legend_url = self.legend_url
if legend_url and not legend_url.startswith("http"):
legend_url = "%s/%s%s" % \
(current.deployment_settings.get_base_public_url(),
current.request.application,
legend_url)
attr = dict(transparent = (self.transparent, (True,)),
version = (self.version, ("1.1.1",)),
format = (self.img_format, ("image/png",)),
map = (self.map, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
buffer = (self.buffer, (0,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
style = (self.style, (None, "")),
bgcolor = (self.bgcolor, (None, "")),
tiled = (self.tiled, (False,)),
legendURL = (legend_url, (None, "")),
queryable = (self.queryable, (False,)),
desc = (self.description, (None, "")),
)
if current.deployment_settings.get_gis_layer_metadata():
# Use CMS to add info about sources
attr["post_id"] = (self.post_id, (None, ""))
else:
# Link direct to sources
attr.update(src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
self.add_attributes_if_not_default(output, **attr)
self.setup_folder_visibility_and_opacity(output)
return output
# -----------------------------------------------------------------------------
class LayerXYZ(Layer):
"""
XYZ Layers from Catalogue
"""
tablename = "gis_layer_xyz"
dictname = "layers_xyz"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": self.url
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# =============================================================================
class Marker(object):
"""
Represents a Map Marker
@ToDo: Support Markers in Themes
"""
def __init__(self,
marker=None,
marker_id=None,
layer_id=None,
tablename=None):
"""
@param marker: Storage object with image/height/width (looked-up in bulk)
@param marker_id: id of record in gis_marker
@param layer_id: layer_id to lookup marker in gis_style (unused)
@param tablename: used to identify whether to provide a default marker as fallback
"""
no_default = False
if not marker:
db = current.db
s3db = current.s3db
mtable = s3db.gis_marker
config = None
if marker_id:
# Lookup the Marker details from it's ID
marker = db(mtable.id == marker_id).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=s3db.cache
).first()
elif layer_id:
# Check if we have a Marker defined for this Layer
config = GIS.get_config()
stable = s3db.gis_style
query = (stable.layer_id == layer_id) & \
((stable.config_id == config.id) | \
(stable.config_id == None)) & \
(stable.marker_id == mtable.id) & \
(stable.record_id == None)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1)).first()
if not marker:
# Check to see if we're a Polygon/LineString
# (& hence shouldn't use a default marker)
if tablename == "gis_layer_shapefile":
table = db.gis_layer_shapefile
query = (table.layer_id == layer_id)
layer = db(query).select(table.gis_feature_type,
limitby=(0, 1)).first()
if layer and layer.gis_feature_type != 1:
no_default = True
#elif tablename == "gis_layer_feature":
# table = db.gis_layer_feature
# query = (table.layer_id == layer_id)
# layer = db(query).select(table.polygons,
# limitby=(0, 1)).first()
# if layer and layer.polygons:
# no_default = True
if marker:
self.image = marker["image"]
self.height = marker["height"]
self.width = marker["width"]
elif no_default:
self.image = None
else:
# Default Marker
if not config:
config = GIS.get_config()
self.image = config.marker_image
self.height = config.marker_height
self.width = config.marker_width
# -------------------------------------------------------------------------
def add_attributes_to_output(self, output):
"""
Called by Layer.as_dict()
"""
if self.image:
output["marker"] = self.as_json_dict()
# -------------------------------------------------------------------------
def as_dict(self):
"""
Called by gis.get_marker(), feature_resources & s3profile
"""
if self.image:
marker = Storage(image = self.image,
height = self.height,
width = self.width,
)
else:
marker = None
return marker
# -------------------------------------------------------------------------
#def as_json(self):
# """
# Called by nothing
# """
# output = dict(i = self.image,
# h = self.height,
# w = self.width,
# )
# return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
def as_json_dict(self):
"""
Called by Style.as_dict() and add_attributes_to_output()
"""
if self.image:
marker = dict(i = self.image,
h = self.height,
w = self.width,
)
else:
marker = None
return marker
# =============================================================================
class Projection(object):
"""
Represents a Map Projection
"""
def __init__(self, projection_id=None):
if projection_id:
s3db = current.s3db
table = s3db.gis_projection
query = (table.id == projection_id)
projection = current.db(query).select(table.epsg,
limitby=(0, 1),
cache=s3db.cache).first()
else:
# Default projection
config = GIS.get_config()
projection = Storage(epsg = config.epsg)
self.epsg = projection.epsg
# =============================================================================
class Style(object):
"""
Represents a Map Style
"""
def __init__(self,
style_id=None,
layer_id=None,
aggregate=None):
db = current.db
s3db = current.s3db
table = s3db.gis_style
fields = [table.marker_id,
table.opacity,
table.popup_format,
# @ToDo: if-required
#table.url_format,
table.cluster_distance,
table.cluster_threshold,
table.style,
]
if style_id:
query = (table.id == style_id)
limitby = (0, 1)
elif layer_id:
config = GIS.get_config()
# @ToDo: if record_id:
query = (table.layer_id == layer_id) & \
(table.record_id == None) & \
((table.config_id == config.id) | \
(table.config_id == None))
if aggregate is not None:
query &= (table.aggregate == aggregate)
fields.append(table.config_id)
limitby = (0, 2)
else:
# Default style for this config
# - falling back to Default config
config = GIS.get_config()
ctable = db.gis_config
query = (table.config_id == ctable.id) & \
((ctable.id == config.id) | \
(ctable.uuid == "SITE_DEFAULT")) & \
(table.layer_id == None)
fields.append(ctable.uuid)
limitby = (0, 2)
styles = db(query).select(*fields,
limitby=limitby)
if len(styles) > 1:
if layer_id:
# Remove the general one
_filter = lambda row: row.config_id == None
else:
# Remove the Site Default
_filter = lambda row: row["gis_config.uuid"] == "SITE_DEFAULT"
styles.exclude(_filter)
if styles:
style = styles.first()
if not layer_id and "gis_style" in style:
style = style["gis_style"]
else:
current.log.error("Style not found!")
style = None
if style:
if style.marker_id:
style.marker = Marker(marker_id=style.marker_id)
if aggregate is True:
# Use gis/location controller in all reports
style.url_format = "%s/{id}.plain" % URL(c="gis", f="location")
elif layer_id:
# Build from controller/function
ftable = s3db.gis_layer_feature
layer = db(ftable.layer_id == layer_id).select(ftable.controller,
ftable.function,
limitby=(0, 1)
).first()
if layer:
style.url_format = "%s/{id}.plain" % \
URL(c=layer.controller, f=layer.function)
self.style = style
# -------------------------------------------------------------------------
def as_dict(self):
"""
"""
# Not JSON-serializable
#return self.style
style = self.style
output = Storage()
if not style:
return output
if hasattr(style, "marker"):
output.marker = style.marker.as_json_dict()
opacity = style.opacity
if opacity and opacity not in (1, 1.0):
output.opacity = style.opacity
if style.popup_format:
output.popup_format = style.popup_format
if style.url_format:
output.url_format = style.url_format
cluster_distance = style.cluster_distance
if cluster_distance is not None and \
cluster_distance != CLUSTER_DISTANCE:
output.cluster_distance = cluster_distance
cluster_threshold = style.cluster_threshold
if cluster_threshold is not None and \
cluster_threshold != CLUSTER_THRESHOLD:
output.cluster_threshold = cluster_threshold
if style.style:
if isinstance(style.style, basestring):
# Native JSON
try:
style.style = json.loads(style.style)
except:
current.log.error("Unable to decode Style: %s" % style.style)
style.style = None
output.style = style.style
return output
# =============================================================================
class S3Map(S3Method):
"""
Class to generate a Map linked to Search filters
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply map method to S3Requests
- produces a full page with S3FilterWidgets above a Map
@param r: the S3Request instance
@param attr: controller attributes for the request
@return: output object to send to the view
"""
if r.http == "GET":
representation = r.representation
if representation == "html":
return self.page(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def page(self, r, **attr):
"""
Map page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.representation in ("html", "iframe"):
response = current.response
resource = self.resource
get_config = resource.get_config
tablename = resource.tablename
widget_id = "default_map"
output = {}
title = response.s3.crud_strings[tablename].get("title_map",
current.T("Map"))
output["title"] = title
# Filter widgets
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
advanced = False
for widget in filter_widgets:
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("map_advanced", True)
break
request = self.request
from s3filter import S3FilterForm
# Apply filter defaults (before rendering the data!)
S3FilterForm.apply_filter_defaults(r, resource)
filter_formstyle = get_config("filter_formstyle", None)
submit = resource.get_config("map_submit", True)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
advanced=advanced,
submit=submit,
ajax=True,
# URL to update the Filter Widget Status
ajaxurl=r.url(method="filter",
vars={},
representation="options"),
_class="filter-form",
_id="%s-filter-form" % widget_id,
)
get_vars = request.get_vars
filter_form = filter_form.html(resource, get_vars=get_vars, target=widget_id)
else:
# Render as empty string to avoid the exception in the view
filter_form = ""
output["form"] = filter_form
# Map
output["map"] = self.widget(r, widget_id=widget_id,
callback='''S3.search.s3map()''', **attr)
# View
response.view = self._view(r, "map.html")
return output
else:
r.error(501, current.ERROR.BAD_FORMAT)
# -------------------------------------------------------------------------
def widget(self,
r,
method="map",
widget_id=None,
visible=True,
callback=None,
**attr):
"""
Render a Map widget suitable for use in an S3Filter-based page
such as S3Summary
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param callback: None by default in case DIV is hidden
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
if not widget_id:
widget_id = "default_map"
gis = current.gis
tablename = self.tablename
ftable = current.s3db.gis_layer_feature
def lookup_layer(prefix, name):
query = (ftable.controller == prefix) & \
(ftable.function == name)
layers = current.db(query).select(ftable.layer_id,
ftable.style_default,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) == 1:
layer_id = layers.first().layer_id
else:
# We can't distinguish
layer_id = None
return layer_id
prefix = r.controller
name = r.function
layer_id = lookup_layer(prefix, name)
if not layer_id:
# Try the tablename
prefix, name = tablename.split("_", 1)
layer_id = lookup_layer(prefix, name)
url = URL(extension="geojson", args=None)
# @ToDo: Support maps with multiple layers (Dashboards)
#_id = "search_results_%s" % widget_id
_id = "search_results"
feature_resources = [{"name" : current.T("Search Results"),
"id" : _id,
"layer_id" : layer_id,
"tablename" : tablename,
"url" : url,
# We activate in callback after ensuring URL is updated for current filter status
"active" : False,
}]
settings = current.deployment_settings
catalogue_layers = settings.get_gis_widget_catalogue_layers()
legend = settings.get_gis_legend()
search = settings.get_gis_search_geonames()
toolbar = settings.get_gis_toolbar()
wms_browser = settings.get_gis_widget_wms_browser()
if wms_browser:
config = gis.get_config()
if config.wmsbrowser_url:
wms_browser = wms_browser = {"name" : config.wmsbrowser_name,
"url" : config.wmsbrowser_url,
}
else:
wms_browser = None
map = gis.show_map(id = widget_id,
feature_resources = feature_resources,
catalogue_layers = catalogue_layers,
collapsed = True,
legend = legend,
toolbar = toolbar,
save = False,
search = search,
wms_browser = wms_browser,
callback = callback,
)
return map
# =============================================================================
class S3ExportPOI(S3Method):
""" Export point-of-interest resources for a location """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
output = dict()
if r.http == "GET":
output = self.export(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def export(self, r, **attr):
"""
Export POI resources.
URL options:
- "resources" list of tablenames to export records from
- "msince" datetime in ISO format, "auto" to use the
feed's last update
- "update_feed" 0 to skip the update of the feed's last
update datetime, useful for trial exports
Supported formats:
.xml S3XML
.osm OSM XML Format
.kml Google KML
(other formats can be requested, but may give unexpected results)
@param r: the S3Request
@param attr: controller options for this request
"""
import time
tfmt = current.xml.ISOFORMAT
# Determine request Lx
current_lx = r.record
if not current_lx: # or not current_lx.level:
# Must have a location
r.error(400, current.ERROR.BAD_REQUEST)
else:
self.lx = current_lx.id
tables = []
# Parse the ?resources= parameter
if "resources" in r.get_vars:
resources = r.get_vars["resources"]
else:
# Fallback to deployment_setting
resources = current.deployment_settings.get_gis_poi_export_resources()
if not isinstance(resources, list):
resources = [resources]
[tables.extend(t.split(",")) for t in resources]
# Parse the ?update_feed= parameter
update_feed = True
if "update_feed" in r.get_vars:
_update_feed = r.get_vars["update_feed"]
if _update_feed == "0":
update_feed = False
# Parse the ?msince= parameter
msince = None
if "msince" in r.get_vars:
msince = r.get_vars["msince"]
if msince.lower() == "auto":
msince = "auto"
else:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(msince, tfmt)
msince = datetime.datetime(y, m, d, hh, mm, ss)
except ValueError:
msince = None
# Export a combined tree
tree = self.export_combined_tree(tables,
msince=msince,
update_feed=update_feed)
xml = current.xml
# Set response headers
response = current.response
s3 = response.s3
headers = response.headers
representation = r.representation
if r.representation in s3.json_formats:
as_json = True
default = "application/json"
else:
as_json = False
default = "text/xml"
headers["Content-Type"] = s3.content_type.get(representation,
default)
# Find XSLT stylesheet and transform
stylesheet = r.stylesheet()
if tree and stylesheet is not None:
args = Storage(domain=xml.domain,
base_url=s3.base_url,
utcnow=datetime.datetime.utcnow().strftime(tfmt))
tree = xml.transform(tree, stylesheet, **args)
if tree:
if as_json:
output = xml.tree2json(tree, pretty_print=True)
else:
output = xml.tostring(tree, pretty_print=True)
return output
# -------------------------------------------------------------------------
def export_combined_tree(self, tables, msince=None, update_feed=True):
"""
Export a combined tree of all records in tables, which
are in Lx, and have been updated since msince.
@param tables: list of table names
@param msince: minimum modified_on datetime, "auto" for
automatic from feed data, None to turn it off
@param update_feed: update the last_update datetime in the feed
"""
db = current.db
s3db = current.s3db
ftable = s3db.gis_poi_feed
lx = self.lx
elements = []
for tablename in tables:
# Define the resource
try:
resource = s3db.resource(tablename, components=[])
except AttributeError:
# Table not defined (module deactivated?)
continue
# Check
if "location_id" not in resource.fields:
# Hardly a POI resource without location_id
continue
# Add Lx filter
self._add_lx_filter(resource, lx)
# Get the feed data
query = (ftable.tablename == tablename) & \
(ftable.location_id == lx)
feed = db(query).select(limitby=(0, 1)).first()
if msince == "auto":
if feed is None:
_msince = None
else:
_msince = feed.last_update
else:
_msince = msince
# Export the tree and append its element to the element list
tree = resource.export_tree(msince=_msince,
references=["location_id"])
# Update the feed data
if update_feed:
muntil = resource.muntil
if feed is None:
ftable.insert(location_id = lx,
tablename = tablename,
last_update = muntil)
else:
feed.update_record(last_update = muntil)
elements.extend([c for c in tree.getroot()])
# Combine all elements in one tree and return it
tree = current.xml.tree(elements, results=len(elements))
return tree
# -------------------------------------------------------------------------
@staticmethod
def _add_lx_filter(resource, lx):
"""
Add a Lx filter for the current location to this
resource.
@param resource: the resource
"""
from s3query import FS
query = (FS("location_id$path").contains("/%s/" % lx)) | \
(FS("location_id$path").like("%s/%%" % lx))
resource.add_filter(query)
# =============================================================================
class S3ImportPOI(S3Method):
"""
Import point-of-interest resources for a location
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
request = current.request
response = current.response
settings = current.deployment_settings
s3 = current.response.s3
title = T("Import from OpenStreetMap")
resources_list = settings.get_gis_poi_export_resources()
uploadpath = os.path.join(request.folder,"uploads/")
from s3utils import s3_yes_no_represent
fields = [Field("text1", # Dummy Field to add text inside the Form
label = "",
default = T("Can read PoIs either from an OpenStreetMap file (.osm) or mirror."),
writable = False),
Field("file", "upload",
uploadfolder = uploadpath,
label = T("File")),
Field("text2", # Dummy Field to add text inside the Form
label = "",
default = "Or",
writable = False),
Field("host",
default = "localhost",
label = T("Host")),
Field("database",
default = "osm",
label = T("Database")),
Field("user",
default = "osm",
label = T("User")),
Field("password", "string",
default = "planet",
label = T("Password")),
Field("ignore_errors", "boolean",
label = T("Ignore Errors?"),
represent = s3_yes_no_represent),
Field("resources",
label = T("Select resources to import"),
requires = IS_IN_SET(resources_list, multiple=True),
default = resources_list,
widget = SQLFORM.widgets.checkboxes.widget)
]
if not r.id:
from s3validators import IS_LOCATION
from s3widgets import S3LocationAutocompleteWidget
# dummy field
field = s3db.org_office.location_id
field.requires = IS_EMPTY_OR(IS_LOCATION())
field.widget = S3LocationAutocompleteWidget()
fields.insert(3, field)
from s3utils import s3_mark_required
labels, required = s3_mark_required(fields, ["file", "location_id"])
s3.has_required = True
form = SQLFORM.factory(*fields,
formstyle = settings.get_ui_formstyle(),
submit_button = T("Import"),
labels = labels,
separator = "",
table_name = "import_poi" # Dummy table name
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
if form_vars.file != "":
File = open(uploadpath + form_vars.file, "r")
else:
# Create .poly file
if r.record:
record = r.record
elif not form_vars.location_id:
form.errors["location_id"] = T("Location is Required!")
return output
else:
gtable = s3db.gis_location
record = current.db(gtable.id == form_vars.location_id).select(gtable.name,
gtable.wkt,
limitby=(0, 1)
).first()
if record.wkt is None:
form.errors["location_id"] = T("Location needs to have WKT!")
return output
error = GIS.create_poly(record)
if error:
current.session.error = error
redirect(URL(args=r.id))
# Use Osmosis to extract an .osm file using this .poly
name = record.name
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = os.path.join(TEMP, "%s.osm" % name)
cmd = ["/home/osm/osmosis/bin/osmosis", # @ToDo: deployment_setting
"--read-pgsql",
"host=%s" % form_vars.host,
"database=%s" % form_vars.database,
"user=%s" % form_vars.user,
"password=%s" % form_vars.password,
"--dataset-dump",
"--bounding-polygon",
"file=%s" % os.path.join(TEMP, "%s.poly" % name),
"--write-xml",
"file=%s" % filename,
]
import subprocess
try:
#result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, e:
current.session.error = T("OSM file generation failed: %s") % e.output
redirect(URL(args=r.id))
except AttributeError:
# Python < 2.7
error = subprocess.call(cmd, shell=True)
if error:
current.log.debug(cmd)
current.session.error = T("OSM file generation failed!")
redirect(URL(args=r.id))
try:
File = open(filename, "r")
except:
current.session.error = T("Cannot open created OSM file!")
redirect(URL(args=r.id))
stylesheet = os.path.join(request.folder, "static", "formats",
"osm", "import.xsl")
ignore_errors = form_vars.get("ignore_errors", None)
xml = current.xml
tree = xml.parse(File)
define_resource = s3db.resource
response.error = ""
import_count = 0
import_res = list(set(form_vars["resources"]) & \
set(resources_list))
for tablename in import_res:
try:
s3db[tablename]
except:
# Module disabled
continue
resource = define_resource(tablename)
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=ignore_errors)
import_count += resource.import_count
except:
response.error += str(sys.exc_info()[1])
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("PoIs successfully imported."))
else:
response.information = T("No PoIs available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
|
flavour/Turkey
|
modules/s3/s3gis.py
|
Python
|
mit
| 392,898
|
[
"Amber"
] |
904f1608143a42e0a104e990548f9cac12a5e8e5d2f96c2846cb598e89c8d67d
|
# -*- coding: UTF-8 -*-
# Created by mcxiaoke on 15/7/4 16:54.
__author__ = 'mcxiaoke'
'''
tzinfo使用,时区处理示例,来自Python文档
'''
from datetime import tzinfo, timedelta, datetime
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# US DST Rules
#
# This is a simplified (i.e., wrong for a few cases) set of rules for US
# DST start and end times. For a complete and up-to-date set of DST rules
# and timezone definitions, visit the Olson Database (or try pytz):
# http://www.twinsun.com/tz/tz-link.htm
# http://sourceforge.net/projects/pytz/ (might not be up-to-date)
#
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
DSTSTART_2007 = datetime(1, 3, 8, 2)
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
DSTEND_2007 = datetime(1, 11, 1, 1)
# From 1987 to 2006, DST used to start at 2am (standard time) on the first
# Sunday in April and to end at 2am (DST time; 1am standard time) on the last
# Sunday of October, which is the first Sunday on or after Oct 25.
DSTSTART_1987_2006 = datetime(1, 4, 1, 2)
DSTEND_1987_2006 = datetime(1, 10, 25, 1)
# From 1967 to 1986, DST used to start at 2am (standard time) on the last
# Sunday in April (the one on or after April 24) and to end at 2am (DST time;
# 1am standard time) on the last Sunday of October, which is the first Sunday
# on or after Oct 25.
DSTSTART_1967_1986 = datetime(1, 4, 24, 2)
DSTEND_1967_1986 = DSTEND_1987_2006
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find start and end times for US DST. For years before 1967, return
# ZERO for no DST.
if 2006 < dt.year:
dststart, dstend = DSTSTART_2007, DSTEND_2007
elif 1986 < dt.year < 2007:
dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006
elif 1966 < dt.year < 1987:
dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986
else:
return ZERO
start = first_sunday_on_or_after(dststart.replace(year=dt.year))
end = first_sunday_on_or_after(dstend.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
|
mcxiaoke/python-labs
|
archives/modules/date_time_tz.py
|
Python
|
apache-2.0
| 5,204
|
[
"VisIt"
] |
a5dab281f232a661a887f0cb6beb4125f05844b0dd01566b3c71004bcf36e79c
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
# sharp Ecoli index for codon adaption index.
# from Sharp & Li, Nucleic Acids Res. 1987
SharpEcoliIndex = {
'GCA':0.586, 'GCC':0.122, 'GCG':0.424, 'GCT':1, 'AGA':0.004, 'AGG':0.002, 'CGA':0.004,
'CGC':0.356, 'CGG':0.004, 'CGT':1, 'AAC':1, 'AAT':0.051, 'GAC':1, 'GAT':0.434, 'TGC':1,
'TGT':0.5, 'CAA':0.124, 'CAG':1, 'GAA':1, 'GAG':0.259, 'GGA':0.01, 'GGC':0.724, 'GGG':0.019,
'GGT':1, 'CAC':1, 'CAT':0.291, 'ATA':0.003, 'ATC':1, 'ATT':0.185, 'CTA':0.007, 'CTC':0.037,
'CTG':1, 'CTT':0.042, 'TTA':0.02, 'TTG':0.02, 'AAA':1, 'AAG':0.253, 'ATG':1, 'TTC':1, 'TTT':0.296,
'CCA':0.135, 'CCC':0.012, 'CCG':1, 'CCT':0.07, 'AGC':0.41, 'AGT':0.085, 'TCA':0.077, 'TCC':0.744,
'TCG':0.017, 'TCT':1, 'ACA':0.076, 'ACC':1,'ACG':0.099, 'ACT':0.965, 'TGG':1, 'TAC':1, 'TAT':0.239,
'GTA':0.495, 'GTC':0.066,'GTG':0.221, 'GTT':1}
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SeqUtils/CodonUsageIndices.py
|
Python
|
apache-2.0
| 910
|
[
"Biopython"
] |
5453fa76b82bd679d8879de413d88b71ded3ed7c7af167c6b5207a79bddcad86
|
"""
Estimates the CPU time required for a phosim simulation of a 30-s visit. The
inputs are filter, moonalt, and moonphase, or obsHistID (an Opsim ID from
a specified (hard coded) Opsim sqlite database.
The random forest is generated (and saved as a pickle file) by
run1_cpu_generate_rf.py, using only the filter, moonalt, and moonphase
features. This script needs to be run only once.
"""
from __future__ import print_function, absolute_import
import os
import pickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab
from .sqlite_tools import SqliteDataFrameFactory
__all__ = ['CpuPred']
class CpuPred(object):
"""
Returns predicted fell-class CPU time in seconds for user-supplied
filter (0-5 for ugrizy), moon altitude (deg), and moon phase (0-100)
-or- ObsHistID from the kraken_1042 Opsim run. By default the code
looks for the sqlite database for kraken_1042 in a specific location on
SLAC Linux. Also by default it looks only for obsHistID values that are
in the Twinkles Run 1 field (by selecting on the corresponding fieldID).
RF_pickle.p is written by run1_cpu_generate_rf.py
"""
def __init__(self, rf_pickle_file='RF_pickle.p',
opsim_db_file='/nfs/farm/g/lsst/u1/DESC/Twinkles/kraken_1042_sqlite.db',
opsim_df = None,
fieldID=1427):
self.RFbest = pickle.load(open(rf_pickle_file, 'rb'))
if opsim_df is None:
factory = SqliteDataFrameFactory(opsim_db_file)
self.obs_conditions = factory.create('obsHistID filter moonAlt moonPhase'.split(), 'Summary',
condition='where fieldID=%d'%fieldID)
else:
self.obs_conditions = opsim_df['obsHistID filter moonAlt moonPhase'.split()]
def __call__(self, obsid):
"""
Return the predicted CPU time given an obsHistID. The obsHistID
must be in the Opsim database file and fieldID specified on
initialization of the instance.
"""
filter_index, moonalt, moonphase = self.conditions(obsid)
return self.cputime(filter_index, moonalt, moonphase)
def conditions(self, obsid):
"""
Return the relevant observing conditions (i.e., those that are
inputs to the Random Forest predictor) for the given obsHistID
"""
rec = self.obs_conditions[self.obs_conditions['obsHistID'] == obsid]
if rec.size != 0:
# Translate the filter string into an index 0-5
filter_index = 'ugrizy'.find(rec['filter'].values[0])
moonalt = math.degrees(rec['moonAlt'].values[0])
moonphase = rec['moonPhase'].values[0]
else:
raise RuntimeError('%d is not a Run 1 obsHistID in field %d'%obsid,fieldID)
return filter_index, moonalt, moonphase
def cputime(self, filter_index, moonalt, moonphase):
return 10.**self.RFbest.predict(np.array([[filter_index, moonalt,
moonphase]]))
if __name__ == '__main__':
# Here are some dumb examples
pred = CpuPred()
print(pred(210))
print(pred.cputime(3.,10.,50.))
# This one won't work
#pred(-999)
# Extract the Run 1 metadata and evaluate the predicted CPU times
run1meta = pd.read_csv(os.path.join(os.environ['TWINKLES_DIR'], 'data',
'run1_metadata_v6.csv'),
usecols=['filter', 'moonalt','moonphase','cputime_fell'])
filter = np.array(run1meta['filter'])
moonalt = np.array(run1meta['moonalt'])
moonphase = np.array(run1meta['moonphase'])
actual = np.array(run1meta['cputime_fell'])
predicted = np.zeros(filter.size,dtype=float)
for i in range(filter.size):
predicted[i] = pred.cputime(filter[i],moonalt[i],moonphase[i])
plt.scatter(np.log10(actual), np.log10(predicted))
plt.plot([4,6.5],[4,6.5])
pylab.ylim([4,6.5])
pylab.xlim([4,6.5])
plt.xlabel('log10(Actual Fell CPU time, s)')
plt.ylabel('log10(Predicted Fell CPU time, s)')
plt.title('Run 1 CPU Times Predicted vs. Actual')
pylab.savefig('predicted_vs_actual.png',bbox_inches='tight')
plt.show()
|
DarkEnergyScienceCollaboration/Twinkles
|
python/desc/twinkles/phosim_cpu_pred.py
|
Python
|
mit
| 4,141
|
[
"VisIt"
] |
b98dc3e1cc0bffdd17434762ed71931ac92bc26c765c09af78e48af78d63c76e
|
# ===================================
# COMPARE Tapas, Telfit, Molecfit
# plotting the transmission spectra
#
# Solene 14.06.2016
# ===================================
#
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from PyAstronomy import pyasl
from scipy.interpolate import interp1d
from sklearn.metrics import mean_squared_error
from math import sqrt
from numpy import linalg as LA
#
#
# TAPAS
# wl and flux classed decreasing, reverse array: array[::-1]
file_tapas = '/home/solene/atmos/tapas/crires1203/tapas_000001.ipac'
rawwl_tapas, rawtrans_tapas = np.loadtxt(file_tapas, skiprows=38, unpack=True)
wl_tapas = rawwl_tapas[::-1]
trans_tapas = rawtrans_tapas[::-1]
# MOLECFIT
#
file_molecfit = '/home/solene/atmos/For_Solene/1203nm/output/molecfit_crires_solene_tac.fits'
hdu_molecfit = fits.open(file_molecfit)
data_molecfit = hdu_molecfit[1].data
cols_molecfit = hdu_molecfit[1].columns
# cols_molecfit.info()
rawwl_molecfit = data_molecfit.field('mlambda')
wl_molecfit = rawwl_molecfit*10e2
trans_molecfit = data_molecfit.field('mtrans')
cflux_molecfit = data_molecfit.field('cflux')
# TELFIT
#
file_telfit = '/home/solene/atmos/trans_telfit.txt'
wl_telfit, trans_telfit, wl_datatelfit, flux_datatelfit = np.loadtxt(
file_telfit, unpack=True)
# Cross-correlation
# from PyAstronomy example
#
# TAPAS is the "template" shifted to match Molecfit
rv, cc = pyasl.crosscorrRV(
wl_molecfit, trans_molecfit, wl_tapas, trans_tapas,
rvmin=-60., rvmax=60.0, drv=0.1, mode='doppler', skipedge=50)
maxind = np.argmax(cc)
print("Cross-correlation function is maximized at dRV = ", rv[maxind], " km/s")
# Doppler shift TAPAS
#
wlcorr_tapas = wl_tapas * (1. + rv[maxind]/299792.)
# transcorr_tapas, wlcorr_tapas = pyasl.dopplerShift(
# wl_tapas[::-1], trans_tapas[::-1], rv[maxind],
# edgeHandling=None, fillValue=None) # Fancy way
# RMS between two spectra TAPAS, MOLECFIT
# do the same with the data and try to better fit the continuum with molecfit
# Selecting 2nd detector only
# USELESS
wlstart = wl_datatelfit[0]
wlend = wl_datatelfit[-1]
ind_molecfit = np.where((wl_molecfit > wlstart) & (wl_molecfit < wlend))
wl_molecfit2 = wl_molecfit[ind_molecfit]
trans_molecfit2 = trans_molecfit[ind_molecfit]
ind_tapas = np.where((wl_tapas > wlstart) & (wl_tapas < wlend))
wl_tapas2 = wl_tapas[ind_tapas]
trans_tapas2 = trans_tapas[ind_tapas]
# Interpolation
# f_molecfit = interp1d(wl_molecfit, trans_molecfit, kind='cubic') # takes forever...
# wlcorr_tapasnew = wlcorr_tapas[500:-500] # raw adjustment of the wl limits
# plt.plot(wl_molecfit, trans_molecfit, 'o', wlcorr_tapasnew, f_molecfit(wlcorr_tapasnew), '.')
f_molecfit = interp1d(wl_molecfit, trans_molecfit)# , kind='cubic') # takes forever...
f_tapas = interp1d(wlcorr_tapas, trans_tapas)
# Euclidean distance at each point
stack_molecfit = np.stack((flux_datatelfit, f_molecfit(wl_datatelfit)), axis=-1)
stack_tapas = np.stack((flux_datatelfit, f_tapas(wl_datatelfit)), axis=-1)
norm_molecfit = LA.norm(stack_molecfit, axis=1)
norm_tapas = LA.norm(stack_tapas, axis=1)
# trans_stack = np.stack((trans_tapas[500:-500], f_molecfit(wlcorr_tapasnew)), axis=-1)
# norm_trans = LA.norm(trans_stack, axis=1)
plt.plot(wl_datatelfit, norm_tapas, 'r.') # see that the continuum is offset 1.4
plt.plot(wl_datatelfit, norm_molecfit, 'k.')
# RMS
err_molec = flux_datatelfit - f_molecfit(wl_datatelfit)
err_tapas = flux_datatelfit, f_tapas(wl_datatelfit)
rms_molec = sqrt(mean_squared_error(flux_datatelfit, f_molecfit(wl_datatelfit)))
rms_tapas = sqrt(mean_squared_error(flux_datatelfit, f_tapas(wl_datatelfit)))
# Plotting
#
plt.figure(1)
plt.subplot(211)
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wl_tapas, trans_tapas, 'b-', label='Tapas')
plt.title('Comparison atmospheric transmission \n CRIRES data')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmission')
plt.legend(loc=3.)
plt.subplot(212)
# plt.plot(wl_tapas, trans_tapas, 'b-', label='Tapas')
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wlcorr_tapas, trans_tapas, 'b--', label='Tapas corrected')
# plot 2nd detector only with WL from the data
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_datatelfit, f_molecfit(wl_datatelfit), 'r-', label='Molecfit')
plt.plot(wl_datatelfit, f_tapas(wl_datatelfit), 'b--', label='Tapas corrected')
# plt.plot(wl_telfit, trans_telfit, 'r-', label='Telfit')
plt.plot(wl_datatelfit, (flux_datatelfit - f_tapas(wl_datatelfit)), 'b.', label='Tapas residuals')
plt.plot(wl_datatelfit, (flux_datatelfit - f_molecfit(wl_datatelfit)), 'r.', label='Molecfit residuals')
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wl_molecfit, cflux_molecfit, 'b-', label='Corrected data - Molecfit')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmission')
# plt.plot(model.x, model.y, 'k-', label='Gaussian fit')
# $\mu=%.2f, \sigma=%.2f$' %(wavestart, waveend)
plt.legend(loc=3.)
plt.show()
|
soleneulmer/atmos
|
compareModels.py
|
Python
|
mit
| 5,232
|
[
"Gaussian"
] |
09526f4d92e27e53ec5593a4f769d643256b0cc0c54c9652dc737d542ee7e66d
|
#! /usr/bin/env python
"""
Get Pilots Logging for specific Pilot UUID or Job ID.
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC import S_OK
from DIRAC.Core.Base import Script
from DIRAC.WorkloadManagementSystem.Client.PilotsLoggingClient import PilotsLoggingClient
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import pilotAgentsDB
from DIRAC.Core.Utilities.PrettyPrint import printTable
uuid = None
jobid = None
def setUUID(optVal):
"""
Set UUID from arguments
"""
global uuid
uuid = optVal
return S_OK()
def setJobID(optVal):
"""
Set JobID from arguments
"""
global jobid
jobid = optVal
return S_OK()
Script.registerSwitch('u:', 'uuid=', 'get PilotsLogging for given Pilot UUID', setUUID)
Script.registerSwitch('j:', 'jobid=', 'get PilotsLogging for given Job ID', setJobID)
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s option value ' % Script.scriptName,
'Only one option (either uuid or jobid) should be used.']))
Script.parseCommandLine()
def printPilotsLogging(logs):
"""
Print results using printTable from PrettyPrint
"""
content = []
labels = ['pilotUUID', 'timestamp', 'source', 'phase', 'status', 'messageContent']
for log in logs:
content.append([log[label] for label in labels])
printTable(labels, content, numbering=False, columnSeparator=' | ')
if uuid:
pilotsLogging = PilotsLoggingClient()
result = pilotsLogging.getPilotsLogging(uuid)
if not result['OK']:
print('ERROR: %s' % result['Message'])
DIRAC.exit(1)
printPilotsLogging(result['Value'])
DIRAC.exit(0)
else:
pilotDB = pilotAgentsDB()
pilotsLogging = PilotsLoggingClient()
pilots = pilotDB.getPilotsForJobID(jobid)
if not pilots['OK ']:
print(pilots['Message'])
for pilotID in pilots:
info = pilotDB.getPilotInfo(pilotID=pilotID)
if not info['OK']:
print(info['Message'])
for pilot in info:
logging = pilotsLogging.getPilotsLogging(pilot['PilotJobReference'])
if not logging['OK']:
print(logging['Message'])
printPilotsLogging(logging)
DIRAC.exit(0)
|
petricm/DIRAC
|
WorkloadManagementSystem/scripts/dirac-admin-pilot-logging-info.py
|
Python
|
gpl-3.0
| 2,238
|
[
"DIRAC"
] |
f32701f7eec6b52a4f278feb5e419f9d4e1c384df2fd7946fe600484230ea69a
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Find and deal with motifs in biological sequence data.
Representing DNA (or RNA or proteins) in a neural network can be difficult
since input sequences can have different lengths. One way to get around
this problem is to deal with sequences by finding common motifs, and counting
the number of times those motifs occur in a sequence. This information can
then be used for creating the neural networks, with occurrences of motifs
going into the network instead of raw sequence data.
"""
# biopython
from Bio.Alphabet import _verify_alphabet
from Bio.Seq import Seq
# local modules
from .Pattern import PatternRepository
class MotifFinder(object):
"""Find motifs in a set of Sequence Records.
"""
def __init__(self, alphabet_strict=1):
"""Initialize a finder to get motifs.
Arguments:
o alphabet_strict - Whether or not motifs should be
restricted to having all of there elements within the alphabet
of the sequences. This requires that the Sequences have a real
alphabet, and that all sequences have the same alphabet.
"""
self.alphabet_strict = alphabet_strict
def find(self, seq_records, motif_size):
"""Find all motifs of the given size in the passed SeqRecords.
Arguments:
o seq_records - A list of SeqRecord objects which the motifs
will be found from.
o motif_size - The size of the motifs we want to look for.
Returns:
A PatternRepository object that contains all of the motifs (and their
counts) found in the training sequences).
"""
motif_info = self._get_motif_dict(seq_records, motif_size)
return PatternRepository(motif_info)
def _get_motif_dict(self, seq_records, motif_size):
"""Return a dictionary with information on motifs.
This internal function essentially does all of the hard work for
finding motifs, and returns a dictionary containing the found motifs
and their counts. This is internal so it can be reused by
find_motif_differences.
"""
if self.alphabet_strict:
alphabet = seq_records[0].seq.alphabet
else:
alphabet = None
# loop through all records to find the motifs in the sequences
all_motifs = {}
for seq_record in seq_records:
# if we are working with alphabets, make sure we are consistent
if alphabet is not None:
assert seq_record.seq.alphabet == alphabet, \
"Working with alphabet %s and got %s" % \
(alphabet, seq_record.seq.alphabet)
# now start finding motifs in the sequence
for start in range(len(seq_record.seq) - (motif_size - 1)):
motif = str(seq_record.seq[start:start + motif_size])
# if we are being alphabet strict, make sure the motif
# falls within the specified alphabet
if alphabet is not None:
motif_seq = Seq(motif, alphabet)
if _verify_alphabet(motif_seq):
all_motifs = self._add_motif(all_motifs, motif)
# if we are not being strict, just add the motif
else:
all_motifs = self._add_motif(all_motifs, motif)
return all_motifs
def find_differences(self, first_records, second_records, motif_size):
"""Find motifs in two sets of records and return the differences.
This is used for finding motifs, but instead of just counting up all
of the motifs in a set of records, this returns the differences
between two listings of seq_records.
o first_records, second_records - Two listings of SeqRecord objects
to have their motifs compared.
o motif_size - The size of the motifs we are looking for.
Returns:
A PatternRepository object that has motifs, but instead of their
raw counts, this has the counts in the first set of records
subtracted from the counts in the second set.
"""
first_motifs = self._get_motif_dict(first_records, motif_size)
second_motifs = self._get_motif_dict(second_records, motif_size)
motif_diffs = {}
# first deal with all of the keys from the first motif
for cur_key in first_motifs:
if cur_key in second_motifs:
motif_diffs[cur_key] = first_motifs[cur_key] - \
second_motifs[cur_key]
else:
motif_diffs[cur_key] = first_motifs[cur_key]
# now see if there are any keys from the second motif
# that we haven't got yet.
missing_motifs = list(second_motifs)
# remove all of the motifs we've already added
for added_motif in motif_diffs:
if added_motif in missing_motifs:
missing_motifs.remove(added_motif)
# now put in all of the motifs we didn't get
for cur_key in missing_motifs:
motif_diffs[cur_key] = 0 - second_motifs[cur_key]
return PatternRepository(motif_diffs)
def _add_motif(self, motif_dict, motif_to_add):
"""Add a motif to the given dictionary.
"""
# incrememt the count of the motif if it is already present
if motif_to_add in motif_dict:
motif_dict[motif_to_add] += 1
# otherwise add it to the dictionary
else:
motif_dict[motif_to_add] = 1
return motif_dict
class MotifCoder(object):
"""Convert motifs and a sequence into neural network representations.
This is designed to convert a sequence into a representation that
can be fed as an input into a neural network. It does this by
representing a sequence based the motifs present.
"""
def __init__(self, motifs):
"""Initialize an input producer with motifs to look for.
Arguments:
o motifs - A complete list of motifs, in order, that are to be
searched for in a sequence.
"""
self._motifs = motifs
# check to be sure the motifs make sense (all the same size)
self._motif_size = len(self._motifs[0])
for motif in self._motifs:
if len(motif) != self._motif_size:
raise ValueError("Motif %s given, expected motif size %s"
% (motif, self._motif_size))
def representation(self, sequence):
"""Represent a sequence as a set of motifs.
Arguments:
o sequence - A Bio.Seq object to represent as a motif.
This converts a sequence into a representation based on the motifs.
The representation is returned as a list of the relative amount of
each motif (number of times a motif occurred divided by the total
number of motifs in the sequence). The values in the list correspond
to the input order of the motifs specified in the initializer.
"""
# initialize a dictionary to hold the motifs in this sequence
seq_motifs = {}
for motif in self._motifs:
seq_motifs[motif] = 0
# count all of the motifs we are looking for in the sequence
for start in range(len(sequence) - (self._motif_size - 1)):
motif = str(sequence[start:start + self._motif_size])
if motif in seq_motifs:
seq_motifs[motif] += 1
# normalize the motifs to go between zero and one
min_count = min(seq_motifs.values())
max_count = max(seq_motifs.values())
# as long as we have some motifs present, normalize them
# otherwise we'll just return 0 for everything
if max_count > 0:
for motif in seq_motifs:
seq_motifs[motif] = (float(seq_motifs[motif] - min_count) /
float(max_count))
# return the relative motif counts in the specified order
motif_amounts = []
for motif in self._motifs:
motif_amounts.append(seq_motifs[motif])
return motif_amounts
|
zjuchenyuan/BioWeb
|
Lib/Bio/NeuralNetwork/Gene/Motif.py
|
Python
|
mit
| 8,336
|
[
"Biopython"
] |
3afe3e5bf265651d72f45c7ae13290b07958d9566f94f255e12ae6cc55e32f94
|
# -*- coding: utf8 -*-
from __future__ import division, with_statement
import sys
import warnings
import numpy as np
import pyfits as fits
from ..extern.six import u, b, iterkeys, itervalues, iteritems, StringIO, PY3
from ..extern.six.moves import zip, range
from ..card import _pad
from ..util import encode_ascii, _pad_length, BLOCK_SIZE
from . import PyfitsTestCase
from .util import catch_warnings, ignore_warnings, CaptureStdio
from nose.tools import assert_raises
class TestOldApiHeaderFunctions(PyfitsTestCase):
"""
Tests that specifically use attributes and methods from the old
Header/CardList API from PyFITS 3.0 and prior.
This tests backward compatibility support for those interfaces.
"""
def setup(self):
super(TestOldApiHeaderFunctions, self).setup()
fits.ignore_deprecation_warnings()
def teardown(self):
warnings.resetwarnings()
super(TestOldApiHeaderFunctions, self).teardown()
def test_ascardimage_verifies_the_comment_string_to_be_ascii_text(self):
# the ascardimage() verifies the comment string to be ASCII text
c = fits.Card.fromstring('ABC = + 2.1 e + 12 / abcde\0')
assert_raises(Exception, c.ascardimage)
def test_rename_key(self):
"""Test backwards compatibility support for Header.rename_key()"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_key('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
def test_add_commentary(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
def test_totxtfile(self):
hdul = fits.open(self.data('test0.fits'))
hdul[0].header.toTxtFile(self.temp('header.txt'))
hdu = fits.ImageHDU()
hdu.header.update('MYKEY', 'FOO', 'BAR')
hdu.header.fromTxtFile(self.temp('header.txt'), replace=True)
assert len(hdul[0].header.ascard) == len(hdu.header.ascard)
assert 'MYKEY' not in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
assert isinstance(fits.open(self.temp('test.fits'))[0],
fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update('MYKEY', 'FOO', 'BAR')
hdu.header.fromTxtFile(self.temp('header.txt'))
# hdu.header should have MYKEY keyword, and also adds PCOUNT and
# GCOUNT, giving it 3 more keywords in total than the original
assert len(hdul[0].header.ascard) == len(hdu.header.ascard) - 3
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
with ignore_warnings():
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
clobber=True)
hdul2 = fits.open(self.temp('test.fits'))
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update('FOO', 'BAR', 'BAZ')
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.ascard['FOO'].comment == 'BAZ'
hdul.writeto(self.temp('test.fits'))
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.ascard['FOO'].comment = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.ascard['FOO'].comment == 'QUX'
def test_long_commentary_card(self):
# Another version of this test using new API methods is found in
# TestHeaderFunctions
header = fits.Header()
header.update('FOO', 'BAR')
header.update('BAZ', 'QUX')
longval = 'ABC' * 30
header.add_history(longval)
header.update('FRED', 'BARNEY')
header.add_history(longval)
assert len(header.ascard) == 7
assert header.ascard[2].key == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.ascard) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header()
header.update('ABC', 0)
header.update('DEF', 1)
header.update('ABD', 2)
cards = header.ascard['AB*']
assert len(cards) == 2
assert cards[0].value == 0
assert cards[1].value == 2
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h.update('FOO', True)
h.update('BAR', False)
assert h['FOO'] == True
assert h['BAR'] == False
assert h.ascard['FOO'].cardimage == fooimg
assert h.ascard['BAR'].cardimage == barimg
h = fits.Header()
h.update('FOO', np.bool_(True))
h.update('BAR', np.bool_(False))
assert h['FOO'] == True
assert h['BAR'] == False
assert h.ascard['FOO'].cardimage == fooimg
assert h.ascard['BAR'].cardimage == barimg
h = fits.Header()
h.ascard.append(fits.Card.fromstring(fooimg))
h.ascard.append(fits.Card.fromstring(barimg))
assert h['FOO'] == True
assert h['BAR'] == False
assert h.ascard['FOO'].cardimage == fooimg
assert h.ascard['BAR'].cardimage == barimg
def test_cardlist_list_methods(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/190"""
header = fits.Header()
header.update('A', 'B', 'C')
header.update('D', 'E', 'F')
# The old header.update method won't let you append a duplicate keyword
header.append(('D', 'G', 'H'))
assert header.ascard.index(header.cards['A']) == 0
assert header.ascard.index(header.cards['D']) == 1
assert header.ascard.index(header.cards[('D', 1)]) == 2
# Since the original CardList class really only works on card objects
# the count method is mostly useless since cards didn't used to compare
# equal sensibly
assert header.ascard.count(header.cards['A']) == 1
assert header.ascard.count(header.cards['D']) == 1
assert header.ascard.count(header.cards[('D', 1)]) == 1
assert header.ascard.count(fits.Card('A', 'B', 'C')) == 0
class TestHeaderFunctions(PyfitsTestCase):
"""Test PyFITS Header and Card objects."""
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
with ignore_warnings():
assert '' == c.key
assert '' == c.keyword
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == "ABC = '<8 ch ' "
c = fits.Card('nullstr', '')
assert str(c) == "NULLSTR = '' "
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == "ABC = T "
c = fits.Card.fromstring('ABC = F')
assert c.value == False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == "LONG_INT= -467374636747637647347374734737437 "
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != "FLOATNUM= -4.6737463674763E+32 " and
str(c) != "FLOATNUM= -4.6737463674763E+032 "):
assert str(c) == "FLOATNUM= -4.6737463674763E+32 "
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
1.2345377437887837487e88 + 6324767364763746367e-33j)
if (str(c) != "ABC = (1.23453774378878E+88, 6.32476736476374E-15) " and
str(c) != "ABC = (1.2345377437887E+088, 6.3247673647637E-015) "):
assert str(c) == "ABC = (1.23453774378878E+88, 6.32476736476374E-15) "
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
with ignore_warnings():
c = fits.Card('abc', 9, 'abcde' * 20)
assert str(c) == "ABC = 9 / abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
c = fits.Card('abc', 'a' * 68, 'abcdefg')
assert str(c) == "ABC = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
assert_raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
assert_raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
with catch_warnings():
warnings.simplefilter('error')
assert_raises(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with catch_warnings(record=True) as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_commentary_cards(self):
# commentary cards
c = fits.Card("HISTORY",
"A commentary card's value has no quotes around it.")
assert str(c) == "HISTORY A commentary card's value has no quotes around it. "
c = fits.Card("comment",
"A commentary card has no comment.", "comment")
assert str(c) == "COMMENT A commentary card has no comment. "
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. / text after slash is still part of the value.")
assert c.value == 'card has no comments. / text after slash is still part of the value.'
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == "HISTORY (1, 2) "
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part ofthe
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == "HISTORY = (1, 2) "
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == ' / EXPOSURE INFORMATION '
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with CaptureStdio():
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment")
with CaptureStdio():
assert str(c) == "NO_QUOTE= 'this card''s value has no quotes' / let's also try the comment "
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_mislocated_equal_sign(self):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with CaptureStdio():
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self):
# the test of "=" location is only up to column 10
# This test used to check if PyFITS rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109 if the format is
# completely wrong we don't make any assumptions and the card should be
# left alone
with CaptureStdio():
c = fits.Card.fromstring("HISTO = (1, 2)")
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with catch_warnings(record=True) as w:
with CaptureStdio():
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
c = fits.Card.fromstring('ABC= a6')
with catch_warnings(record=True) as w:
with CaptureStdio():
c.verify('fix')
fix_text = "Fixed 'ABC' card to meet the FITS standard."
assert len(w) == 4
assert fix_text in str(w[1].message)
assert str(c) == "ABC = 'a6 ' "
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '&' / long comment ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
with catch_warnings(record=True) as w:
h2['TEST'] = u('abcdefg') * 30
assert len(w) == 0
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert repr(header).splitlines() == [
str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '&' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '&' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '&' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1") +
_pad("continue 'continue with long string but without the ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' / comments with ''. "))
with CaptureStdio():
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '&' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with catch_warnings(record=True) as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert 'HIERARCH card will be created' in str(w[0].message)
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value',
'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index('key.META_0')]) == str(header2.cards[header2.index('key.META_0')])
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
assert_raises(KeyError, lambda k: header[k], 'NAXIS')
# Test the exception message
try:
header['NAXIS']
except KeyError:
exc = sys.exc_info()[1]
assert exc.args[0] == "Keyword 'NAXIS' not found."
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with catch_warnings(record=True) as w:
header.update('HIERARCH BLAH BLAH', 'TESTA')
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update('HIERARCH BLAH BLAH', 'TESTB')
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update('BLAH BLAH', 'TESTC')
assert len(w) == 0
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update('HIERARCH blah blah', 'TESTD')
assert len(w) == 0
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update('blah blah', 'TESTE')
assert len(w) == 0
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update('BLAH BLAH BLAH', 'TESTA')
assert len(w) == 1
assert msg in str(w[0].message)
header.update('HIERARCH BLAH BLAH BLAH', 'TESTB')
assert len(w) == 1
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update('BLAH BLAH BLAH', 'TESTC')
assert len(w) == 1
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update('HIERARCH blah blah blah', 'TESTD')
assert len(w) == 1
assert header['blah blah blah'], 'TESTD'
header.update('blah blah blah', 'TESTE')
assert len(w) == 1
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with catch_warnings(record=True) as w:
header.update('HIERARCH BLA BLA', 'TESTA')
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update('HIERARCH BLA BLA', 'TESTB')
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update('BLA BLA', 'TESTC')
assert len(w) == 0
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update('HIERARCH bla bla', 'TESTD')
assert len(w) == 0
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update('bla bla', 'TESTE')
assert len(w) == 0
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with catch_warnings(record=True) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update('BLA BLA', 'TESTA')
assert len(w) == 1
assert msg in str(w[0].message)
header.update('HIERARCH BLA BLA', 'TESTB')
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update('BLA BLA', 'TESTC')
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update('HIERARCH bla bla', 'TESTD')
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update('bla bla', 'TESTE')
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
assert_raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
assert header['FOO'] == 'BAR'
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
assert header['FOO'] == 'BAR'
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card a
blank value.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] == ''
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] == ''
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == ''
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == list(iteritems(header))
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(iterkeys(header), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(itervalues(header), ['B', 'D']):
assert a == b
def test_header_keys(self):
hdul = fits.open(self.data('arange.fits'))
assert (list(hdul[0].header.keys()) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header.keys()) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header.keys()) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header.keys()) == ['E']
assert_raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
assert_raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header.keys()) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header.keys()) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header.keys()) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
assert_raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
assert_raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header.keys())[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
assert_raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header.keys())[-1] == 'E'
assert header[-1] == ''
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header.keys())[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
# TODO: Test the Header.remove() method; add support for ignore_missing
pass
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert repr(header.comments) == ' A C\n DEF H'
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header['FOO'] = ('BAR', 'BAZ')
hdul.writeto(self.temp('test.fits'))
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header.keys())[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
assert_raises(ValueError, setitem, 'END', '')
assert_raises(ValueError, header.append, 'END')
assert_raises(ValueError, header.append, 'END', end=True)
assert_raises(ValueError, header.insert, len(header), 'END')
assert_raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appened by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with catch_warnings(record=True) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: ' ='")
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with catch_warnings(record=True) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: ' ='")
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with catch_warnings(record=True) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: '$%&%^*%*'")
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with catch_warnings(record=True) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Missing padding to end of the FITS block")
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header.keys()) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with CaptureStdio():
assert str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards['FOCALLEN']._modified
assert str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with CaptureStdio():
assert str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards['FOCALLEN']._modified
assert str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through PyFITS--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
hdul = fits.open(self.temp('test.fits'))
with catch_warnings(record=True) as w:
with CaptureStdio():
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now PyFITS will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] == True
assert h['BAR'] == False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] == True
assert h['BAR'] == False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] == True
assert h['BAR'] == False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h.keys()) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h.keys()) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h.keys()) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h.keys()) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h.keys()) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h.keys()) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape((10, 10))
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
with ignore_warnings():
hdu.writeto(self.temp('test.fits'), clobber=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = u('\u30a8\u30ea\u30c3\u30af')
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h[u('FOO')] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert h[u('FOO')] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
assert_raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = u('BAZ')
assert h[u('FOO')] == 'BAZ'
assert h[u('FOO')] == u('BAZ')
assert repr(h) == _pad("FOO = 'BAZ '")
assert_raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', u('BAZ'))
assert h['FOO'] == 'BAR'
assert h['FOO'] == u('BAR')
assert h.comments['FOO'] == 'BAZ'
assert h.comments['FOO'] == u('BAZ')
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
h['FOO'] = (u('BAR'), u('BAZ'))
assert h['FOO'] == 'BAR'
assert h['FOO'] == u('BAR')
assert h.comments['FOO'] == 'BAZ'
assert h.comments['FOO'] == u('BAZ')
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
assert_raises(ValueError, assign, 'FOO', ('BAR', erikku))
assert_raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
assert_raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that Python 2 `unicode` objects
and Python 3 `str` objects containing non-ASCII characters cannot be
assigned to headers, there is a bug that allows Python 2 `str` objects
of arbitrary encoding containing non-ASCII characters to be passed
through.
On Python 3 it should not be possible to assign bytes to a header at
all.
"""
h = fits.Header()
if PY3:
assert_raises(ValueError, h.set, 'TEST', b('Hello'))
else:
assert_raises(ValueError, h.set, 'TEST', str('ñ'))
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
pyfits.STRIP_HEADER_WHITESPACE = False it should not be stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
fits.STRIP_HEADER_WHITESPACE = False
try:
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
finally:
fits.STRIP_HEADER_WHITESPACE = True
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the origianl header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with catch_warnings(record=True) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with catch_warnings(record=True) as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
assert_raises(ValueError, header.set, 'CLFIND2D', 'foo')
assert_raises(ValueError, header.set, 'Just som', 'foo')
assert_raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with CaptureStdio():
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
# There is an obscure cross-platform issue that prevents things like
# float('nan') on Windows on older versions of Python; hence it is
# unlikely to come up in practice
if not (sys.platform.startswith('win32') and
sys.version_info[:2] < (2, 6)):
assert_raises(ValueError, h.set, 'TEST', float('nan'))
assert_raises(ValueError, h.set, 'TEST', float('inf'))
assert_raises(ValueError, h.set, 'TEST', np.nan)
assert_raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
assert_raises(ValueError, h.set, 'HISTORY', '\n')
assert_raises(ValueError, h.set, 'HISTORY', '\nabc')
assert_raises(ValueError, h.set, 'HISTORY', 'abc\n')
assert_raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
assert_raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
class TestRecordValuedKeywordCards(PyfitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the FITS WCS
Paper IV proposal.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
"""
def setup(self):
super(TestRecordValuedKeywordCards, self).setup()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with ignore_warnings():
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
assert_raises(IndexError, lambda x: self._test_header[x], 8)
assert_raises(KeyError, lambda k: self._test_header[k], 'DP1.AXIS.3')
# Test the exception message
try:
self._test_header['DP1.AXIS.3']
except KeyError:
exc = sys.exc_info()[1]
assert exc.args[0] == "Keyword 'DP1.AXIS.3' not found."
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header.keys())[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header.keys())[1] == 'DP1.AXIS.2'
assert self._test_header[1] == 2
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header.keys())[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header.keys())[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header.keys())[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header.keys())[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert (
[str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert (
[str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.keys()) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not stricly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
assert_raises(KeyError, lambda: h['FOO.AXIS'])
assert_raises(KeyError, lambda: h['FOO.AXIS.'])
assert_raises(KeyError, lambda: h['FOO.'])
|
embray/PyFITS
|
lib/pyfits/tests/test_header.py
|
Python
|
bsd-3-clause
| 98,445
|
[
"BLAST"
] |
0026bb6d2ee7639efae43c6781270bb607b970571ac8d12d4d84239d7c35416b
|
#
# cpair
#
# // overview
# Enumeration for colourpairs in the platform. By having a centralised
# enumeration, we can try to make the user experiences fairly consistent
# across different types of console (pygame, ncurses, etc).
#
# // license
# Copyright 2016, Free Software Foundation.
#
# This file is part of Solent.
#
# Solent is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Solent is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Solent. If not, see <http://www.gnu.org/licenses/>.
import enum
class e_cpair(enum.Enum):
"""Colour pairs. The numbering system has been selected to try and make
things easy for limited-colour consoles who want to round-down to make
a best-fit for their available colours. This way we should be able to
make later additions to the available colours without causing hard
breaks on older curses tweaking.
To understand where I started, see the colour diagram at
https://en.wikipedia.org/wiki/File:Color_star-en_(tertiary_names).svg
Seems to be the RYB colour model.
"""
# 50-100 is shares of grey, ending in white
grey = 66
# 100-200 is colour shares on black background
white = 100
red = 120
vermilion = 125 # FF4500
orange = 130 # FFA500
amber = 135
yellow = 140
chartreuse = 145 # 7FFF00
green = 150
teal = 155
blue = 160
violet = 165
purple = 170
magenta = 175
# 200-300 is shades of grey on a very-light-but-unspecified background
black_info = 200
grey_info = 250
# 300-400 is shares of colour on an unspecified-colour background
green_info = 350
# 400 is an alarm colour pair
alarm = 400
def solent_cpair_pairs():
"""Returns (cpair_value, cpair_name), sorted."""
lst = []
for name in dir(e_cpair):
if name.startswith('__'):
continue
print('name %s'%name)
v = getattr(e_cpair, name).value
lst.append( (v, name) )
lst.sort()
return lst
def solent_name_cpair(value):
return e_cpair[value]
def solent_cpair(name):
'''
Pass in the name of a member of e_cpair. This uses reflection to look it
up from the enumeration itself, and returns the value of the selected
item.
'''
l = dir(e_cpair)
if name not in l:
raise Exception("e_cpair.%s does not exist"%(name))
return getattr(e_cpair, name).value
class e_reduced_cpair(enum.Enum):
red_t = 0
green_t = 1
yellow_t = 2
blue_t = 3
purple_t = 4
cyan_t = 5
white_t = 6
t_red = 7
t_green = 8
t_yellow = 9
white_blue = 10
white_purple = 11
black_cyan = 12
t_white = 13
def solent_reduce_cpair_to_basic_code(value):
"""In situations where you have a basic colour palette environment (e.g.
curses) you want a simple way to reduce from the fancy colour to
something that works for you."""
|
solent-eng/solent
|
solent/cpair.py
|
Python
|
lgpl-3.0
| 3,576
|
[
"Amber"
] |
0ea0b13cbbdc1954b59bd34ed11a31f61318a16c79b5b698724067f4fcf41022
|
# mako/pyparser.py
# Copyright 2006-2021 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
import operator
import _ast
from mako import _ast_util
from mako import compat
from mako import exceptions
from mako import util
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = {"True", "False", "None", "print"}
# the "id" attribute on a function node
arg_id = operator.attrgetter("arg")
util.restore__ast(_ast)
def parse(code, mode="exec", **exception_kwargs):
"""Parse an expression into AST"""
try:
return _ast_util.parse(code, "<unknown>", mode)
except Exception as e:
raise exceptions.SyntaxException(
"(%s) %s (%r)"
% (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50],
),
**exception_kwargs,
) from e
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.visit(node.type)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
yield from arg.elts
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union(
[arg_id(arg) for arg in self._expand_tuples(node.args.args)]
)
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif (
node.id not in reserved
and node.id not in self.listener.declared_identifiers
and node.id not in self.local_ident_stack
):
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split(".")[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
elif name.name == "*":
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.",
**self.exception_kwargs,
)
else:
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
ldi = self.listener.declared_identifiers
self.listener.declared_identifiers = ldi.union(
p.declared_identifiers
)
lui = self.listener.undeclared_identifiers
self.listener.undeclared_identifiers = lui.union(
p.undeclared_identifiers
)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg.arg)
kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
if node.args.kwarg:
kwargnames.append(node.args.kwarg.arg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.kwargnames = kwargnames
self.listener.kwdefaults = node.args.kw_defaults
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator:
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(" " * 4)
self.generator.visit(astnode)
def value(self):
return "".join(self.generator.result)
|
sqlalchemy/mako
|
mako/pyparser.py
|
Python
|
mit
| 7,032
|
[
"VisIt"
] |
eb264bf2fdca92a6a230e17eda40275091c41c92f7265807fd27346f0e385dee
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- Change dpkg package selection state via --get-selections and --set-selections.
version_added: "2.0"
author:
- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
options:
name:
description:
- Name of the package.
required: true
type: str
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
type: str
extends_documentation_fragment:
- action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
support: full
platforms: debian
notes:
- This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
'''
EXAMPLES = '''
- name: Prevent python from being upgraded
dpkg_selections:
name: python
selection: hold
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
selection=dict(choices=['install', 'hold', 'deinstall', 'purge'], required=True)
),
supports_check_mode=True,
)
dpkg = module.get_bin_path('dpkg', True)
name = module.params['name']
selection = module.params['selection']
# Get current settings.
rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
if not out:
current = 'not present'
else:
current = out.split()[1]
changed = current != selection
if module.check_mode or not changed:
module.exit_json(changed=changed, before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection)
if __name__ == '__main__':
main()
|
srvg/ansible
|
lib/ansible/modules/dpkg_selections.py
|
Python
|
gpl-3.0
| 2,298
|
[
"Brian"
] |
3792dc7de93aef52e2fc37b6f77844ff3736051f710163b59c36618b9b6bf1a5
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Write ESPResSo trajectories in the H5MD format. See :ref:`Writing H5MD-files`.
"""
import espressomd
from espressomd.io.writer import h5md # pylint: disable=import-error
from espressomd import polymer
from espressomd import interactions
system = espressomd.System(box_l=[100.0, 100.0, 100.0])
system.time_step = 0.01
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
system.cell_system.skin = 0.4
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
positions = polymer.linear_polymer_positions(n_polymers=5,
beads_per_chain=50,
bond_length=1.0,
seed=1234)
for polymer in positions:
for i, pos in enumerate(polymer):
id = len(system.part)
system.part.add(id=id, pos=pos)
if i > 0:
system.part[id].add_bond((fene, id - 1))
h5_units = h5md.UnitSystem(time='ps', mass='u', length='nm', charge='e')
h5_file = h5md.H5md(file_path="sample.h5", unit_system=h5_units)
for i in range(2):
h5_file.write()
system.integrator.run(steps=10)
h5_file.flush()
h5_file.close()
|
KaiSzuttor/espresso
|
samples/h5md.py
|
Python
|
gpl-3.0
| 1,897
|
[
"ESPResSo"
] |
cce4e302f5e12122e4f4e8a558a43a4e87b2c817f0ebc0224ae399ec4851afaf
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .proc_table import procedures, hooks, energy_only_methods
from .proc import scf_helper, scf_wavefunction_factory
from .empirical_dispersion import EmpericalDispersion
from . import dft_functional
|
kannon92/psi4
|
psi4/driver/procedures/__init__.py
|
Python
|
gpl-2.0
| 1,150
|
[
"Psi4"
] |
975005324002faee0dedcbf83d1004d40aa2db841d794fa7a4ff9e2912225abc
|
''' CSHelpers
Module containing functions interacting with the CS and useful for the RSS
modules.
'''
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName
from DIRAC.ResourceStatusSystem.Utilities import Utils
from DIRAC.Resources.Storage.StorageElement import StorageElement
__RCSID__ = '$Id: $'
def warmUp():
'''
gConfig has its own dark side, it needs some warm up phase.
'''
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
gRefresher.refreshConfigurationIfNeeded()
## Main functions ##############################################################
def getSites():
'''
Gets all sites from /Resources/Sites
'''
_basePath = 'Resources/Sites'
sites = []
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
domainSites = gConfig.getSections( '%s/%s' % ( _basePath, domainName ) )
if not domainSites[ 'OK' ]:
return domainSites
domainSites = domainSites[ 'Value' ]
sites.extend( domainSites )
# Remove duplicated ( just in case )
sites = list( set ( sites ) )
return S_OK( sites )
def getGOCSites( diracSites = None ):
if diracSites is None:
diracSites = getSites()
if not diracSites[ 'OK' ]:
return diracSites
diracSites = diracSites[ 'Value' ]
gocSites = []
for diracSite in diracSites:
gocSite = getGOCSiteName( diracSite )
if not gocSite[ 'OK' ]:
continue
gocSites.append( gocSite[ 'Value' ] )
return S_OK( list( set( gocSites ) ) )
def getDomainSites():
'''
Gets all sites from /Resources/Sites
'''
_basePath = 'Resources/Sites'
sites = {}
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
domainSites = gConfig.getSections( '%s/%s' % ( _basePath, domainName ) )
if not domainSites[ 'OK' ]:
return domainSites
domainSites = domainSites[ 'Value' ]
sites[ domainName ] = domainSites
return S_OK( sites )
def getResources():
'''
Gets all resources
'''
resources = []
ses = getStorageElements()
if ses[ 'OK' ]:
resources = resources + ses[ 'Value' ]
fts = getFTS()
if fts[ 'OK' ]:
resources = resources + fts[ 'Value' ]
fc = getFileCatalogs()
if fc[ 'OK' ]:
resources = resources + fc[ 'Value' ]
ce = getComputingElements()
if ce[ 'OK' ]:
resources = resources + ce[ 'Value' ]
return S_OK( resources )
def getNodes():
'''
Gets all nodes
'''
nodes = []
queues = getQueues()
if queues[ 'OK' ]:
nodes = nodes + queues[ 'Value' ]
return S_OK( nodes )
################################################################################
def getStorageElements():
'''
Gets all storage elements from /Resources/StorageElements
'''
_basePath = 'Resources/StorageElements'
seNames = gConfig.getSections( _basePath )
return seNames
def getStorageElementsHosts( seNames = None ):
seHosts = []
if seNames is None:
seNames = getStorageElements()
if not seNames[ 'OK' ]:
return seNames
seNames = seNames[ 'Value' ]
for seName in seNames:
seHost = getSEHost( seName )
if not seHost['OK']:
gLogger.warn( "Could not get SE Host", "SE: %s" % seName )
continue
if seHost['Value']:
seHosts.append( seHost['Value'] )
return S_OK( list( set( seHosts ) ) )
def _getSEParameters( seName ):
se = StorageElement( seName, hideExceptions = True )
pluginsList = se.getPlugins()
if not pluginsList['OK']:
gLogger.warn( pluginsList['Message'], "SE: %s" % seName )
return pluginsList
pluginsList = pluginsList['Value']
# Put the srm capable protocol first, but why doing that is a
# mystery that will eventually need to be sorted out...
for plugin in ( 'GFAL2_SRM2', 'SRM2' ):
if plugin in pluginsList:
pluginsList.remove( plugin )
pluginsList.insert( 0, plugin )
for plugin in pluginsList:
seParameters = se.getStorageParameters( plugin )
if seParameters['OK']:
break
return seParameters
def getSEToken( seName ):
''' Get StorageElement token
'''
seParameters = _getSEParameters( seName )
if not seParameters['OK']:
gLogger.warn( "Could not get SE parameters", "SE: %s" % seName )
return seParameters
return S_OK( seParameters['Value']['SpaceToken'] )
def getSEHost( seName ):
''' Get StorageElement host name
'''
seParameters = _getSEParameters( seName )
if not seParameters['OK']:
gLogger.warn( "Could not get SE parameters", "SE: %s" % seName )
return seParameters
return S_OK( seParameters['Value']['Host'] )
def getStorageElementEndpoint( seName ):
""" Get endpoint as combination of host, port, wsurl
"""
seParameters = _getSEParameters( seName )
if not seParameters['OK']:
gLogger.warn( "Could not get SE parameters", "SE: %s" % seName )
return seParameters
host = seParameters['Value']['Host']
port = seParameters['Value']['Port']
wsurl = seParameters['Value']['WSUrl']
# MAYBE wusrl is not defined
if host and port:
url = 'httpg://%s:%s%s' % ( host, port, wsurl )
url = url.replace( '?SFN=', '' )
return S_OK( url )
return S_ERROR( ( host, port, wsurl ) )
def getStorageElementEndpoints( storageElements = None ):
if storageElements is None:
storageElements = getStorageElements()
if not storageElements[ 'OK' ]:
return storageElements
storageElements = storageElements[ 'Value' ]
storageElementEndpoints = []
for se in storageElements:
seEndpoint = getStorageElementEndpoint( se )
if not seEndpoint[ 'OK' ]:
continue
storageElementEndpoints.append( seEndpoint[ 'Value' ] )
return S_OK( list( set( storageElementEndpoints ) ) )
def getFTS():
'''
Gets all storage elements from /Resources/FTSEndpoints
'''
ftsEndpoints = []
fts2 = getFTS2()
if not fts2['OK']:
return fts2
ftsEndpoints += fts2['Value']
fts3 = getFTS3()
if not fts3['OK']:
return fts3
ftsEndpoints += fts3['Value']
return S_OK( ftsEndpoints )
def getFTS2():
'''
Gets all storage elements from /Resources/FTSEndpoints
'''
_basePath = 'Resources/FTSEndpoints/FTS2'
ftsEndpoints = gConfig.getOptions( _basePath )
ftsEndpointDefaultLocation = gConfig.getValue( '/Resources/FTSEndpoints/Default/FTSEndpoint', '' )
if ftsEndpoints['OK'] and ftsEndpointDefaultLocation:
ftsEndpoints['Value'].append( ftsEndpointDefaultLocation )
return ftsEndpoints
def getFTS3():
'''
Gets all storage elements from /Resources/FTSEndpoints
'''
_basePath = 'Resources/FTSEndpoints/FTS3'
ftsEndpoints = gConfig.getOptions( _basePath )
return ftsEndpoints
def getSpaceTokenEndpoints():
''' Get Space Token Endpoints '''
return Utils.getCSTree( 'Shares/Disk' )
def getFileCatalogs():
'''
Gets all storage elements from /Resources/FileCatalogs
'''
_basePath = 'Resources/FileCatalogs'
fileCatalogs = gConfig.getSections( _basePath )
return fileCatalogs
def getComputingElements():
'''
Gets all computing elements from /Resources/Sites/<>/<>/CE
'''
_basePath = 'Resources/Sites'
ces = []
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
domainSites = gConfig.getSections( '%s/%s' % ( _basePath, domainName ) )
if not domainSites[ 'OK' ]:
return domainSites
domainSites = domainSites[ 'Value' ]
for site in domainSites:
siteCEs = gConfig.getSections( '%s/%s/%s/CEs' % ( _basePath, domainName, site ) )
if not siteCEs[ 'OK' ]:
# return siteCEs
gLogger.error( siteCEs[ 'Message' ] )
continue
siteCEs = siteCEs[ 'Value' ]
ces.extend( siteCEs )
# Remove duplicated ( just in case )
ces = list( set ( ces ) )
return S_OK( ces )
# #
# Quick functions implemented for Andrew
def getSiteComputingElements( siteName ):
'''
Gets all computing elements from /Resources/Sites/<>/<siteName>/CE
'''
_basePath = 'Resources/Sites'
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
ces = gConfig.getValue( '%s/%s/%s/CE' % ( _basePath, domainName, siteName ), '' )
if ces:
return ces.split( ', ' )
return []
def getSiteStorageElements( siteName ):
'''
Gets all computing elements from /Resources/Sites/<>/<siteName>/SE
'''
_basePath = 'Resources/Sites'
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
ses = gConfig.getValue( '%s/%s/%s/SE' % ( _basePath, domainName, siteName ), '' )
if ses:
return ses.split( ', ' )
return []
def getSiteElements( siteName ):
'''
Gets all the computing and storage elements for a given site
'''
resources = []
ses = getSiteStorageElements(siteName)
resources = resources + ses
ce = getSiteComputingElements(siteName)
resources = resources + ce
return S_OK( resources )
def getQueues():
'''
Gets all computing elements from /Resources/Sites/<>/<>/CE/Queues
'''
_basePath = 'Resources/Sites'
queues = []
domainNames = gConfig.getSections( _basePath )
if not domainNames[ 'OK' ]:
return domainNames
domainNames = domainNames[ 'Value' ]
for domainName in domainNames:
domainSites = gConfig.getSections( '%s/%s' % ( _basePath, domainName ) )
if not domainSites[ 'OK' ]:
return domainSites
domainSites = domainSites[ 'Value' ]
for site in domainSites:
siteCEs = gConfig.getSections( '%s/%s/%s/CEs' % ( _basePath, domainName, site ) )
if not siteCEs[ 'OK' ]:
# return siteCEs
gLogger.error( siteCEs[ 'Message' ] )
continue
siteCEs = siteCEs[ 'Value' ]
for siteCE in siteCEs:
siteQueue = gConfig.getSections( '%s/%s/%s/CEs/%s/Queues' % ( _basePath, domainName, site, siteCE ) )
if not siteQueue[ 'OK' ]:
# return siteQueue
gLogger.error( siteQueue[ 'Message' ] )
continue
siteQueue = siteQueue[ 'Value' ]
queues.extend( siteQueue )
# Remove duplicated ( just in case )
queues = list( set ( queues ) )
return S_OK( queues )
## /Registry ###################################################################
def getRegistryUsers():
'''
Gets all users from /Registry/Users
'''
_basePath = 'Registry/Users'
registryUsers = {}
userNames = gConfig.getSections( _basePath )
if not userNames[ 'OK' ]:
return userNames
userNames = userNames[ 'Value' ]
for userName in userNames:
# returns { 'Email' : x, 'DN': y, 'CA' : z }
userDetails = gConfig.getOptionsDict( '%s/%s' % ( _basePath, userName ) )
if not userDetails[ 'OK' ]:
return userDetails
registryUsers[ userName ] = userDetails[ 'Value' ]
return S_OK( registryUsers )
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Andrew-McNab-UK/DIRAC
|
ResourceStatusSystem/Utilities/CSHelpers.py
|
Python
|
gpl-3.0
| 11,484
|
[
"DIRAC"
] |
c87552ab129982b36398c319a0018708edd478b170170f5664289c0f674a0990
|
import os
import subprocess
import unittest
import netCDF4
import numpy as np
import requests
import bald
from bald.tests import BaldTestCase
OGCFiles = ('https://raw.githubusercontent.com/opengeospatial/netcdf-ld/'
'master/standard/abstract_tests/')
class Test(BaldTestCase):
def setUp(self):
self.cdl_path = os.path.join(os.path.dirname(__file__), 'CDL')
self.ttl_path = os.path.join(os.path.dirname(__file__), 'TTL')
self.maxDiff = None
def test_conformance_a(self):
with self.temp_filename('.nc') as tfile:
cdlname = 'ogcClassA.cdl'
cdl_file = os.path.join(self.cdl_path, cdlname)
with open(cdl_file, 'w') as cdlf:
cdluri = '{}CDL/ogcClassA.cdl'.format(OGCFiles)
r = requests.get(cdluri)
if r.status_code != 200:
raise ValueError('CDL download failed for {}'.format(cdluri))
cdlf.write(r.text)
subprocess.check_call(['ncgen', '-o', tfile, cdl_file])
cdl_file_uri = 'http://secret.binary-array-ld.net/identity.nc'
root_container = bald.load_netcdf(tfile, baseuri=cdl_file_uri, cache=self.acache)
ttl = root_container.rdfgraph().serialize(format='n3').decode("utf-8")
ttl_file = os.path.join(self.ttl_path, 'ogcClassA.ttl')
with open(ttl_file, 'w') as ttlf:
ttluri = '{}TTL/ogcClassA.ttl'.format(OGCFiles)
r = requests.get(ttluri)
if r.status_code != 200:
raise ValueError('TTL download failed for {}'.format(ttluri))
ttlf.write(r.text)
with open(ttl_file, 'r') as sf:
expected_ttl = sf.read()
os.remove(ttl_file)
os.remove(cdl_file)
self.assertEqual(expected_ttl, ttl)
def test_conformance_b(self):
with self.temp_filename('.nc') as tfile:
cdlname = 'ogcClassB.cdl'
cdl_file = os.path.join(self.cdl_path, cdlname)
with open(cdl_file, 'w') as cdlf:
cdluri = '{}CDL/ogcClassB.cdl'.format(OGCFiles)
r = requests.get(cdluri)
if r.status_code != 200:
raise ValueError('CDL download failed for {}'.format(cdluri))
cdlf.write(r.text)
subprocess.check_call(['ncgen', '-o', tfile, cdl_file])
cdl_file_uri = 'http://secret.binary-array-ld.net/prefix.nc'
root_container = bald.load_netcdf(tfile, baseuri=cdl_file_uri, cache=self.acache)
ttl = root_container.rdfgraph().serialize(format='n3').decode("utf-8")
ttl_file = os.path.join(self.ttl_path, 'ogcClassA.ttl')
with open(ttl_file, 'w') as ttlf:
ttluri = '{}TTL/ogcClassB.ttl'.format(OGCFiles)
r = requests.get(ttluri)
if r.status_code != 200:
raise ValueError('TTL download failed for {}'.format(ttluri))
ttlf.write(r.text)
with open(ttl_file, 'r') as sf:
expected_ttl = sf.read()
os.remove(ttl_file)
os.remove(cdl_file)
self.assertEqual(expected_ttl, ttl)
def test_conformance_c(self):
with self.temp_filename('.nc') as tfile:
cdlname = 'ogcClassC.cdl'
cdl_file = os.path.join(self.cdl_path, cdlname)
with open(cdl_file, 'w') as cdlf:
cdluri = '{}CDL/ogcClassC.cdl'.format(OGCFiles)
r = requests.get(cdluri)
if r.status_code != 200:
raise ValueError('CDL download failed for {}'.format(cdluri))
cdlf.write(r.text)
subprocess.check_call(['ncgen', '-o', tfile, cdl_file])
cdl_file_uri = 'http://secret.binary-array-ld.net/alias.nc'
alias_dict = {'NetCDF': 'http://def.scitools.org.uk/NetCDF'}
root_container = bald.load_netcdf(tfile, baseuri=cdl_file_uri,
alias_dict=alias_dict, cache=self.acache)
ttl = root_container.rdfgraph().serialize(format='n3').decode("utf-8")
ttl_file = os.path.join(self.ttl_path, 'ogcClassA.ttl')
with open(ttl_file, 'w') as ttlf:
ttluri = '{}TTL/ogcClassC.ttl'.format(OGCFiles)
r = requests.get(ttluri)
if r.status_code != 200:
raise ValueError('TTL download failed for {}'.format(ttluri))
ttlf.write(r.text)
with open(ttl_file, 'r') as sf:
expected_ttl = sf.read()
os.remove(ttl_file)
os.remove(cdl_file)
self.assertEqual(expected_ttl, ttl)
def test_conformance_d(self):
with self.temp_filename('.nc') as tfile:
cdlname = 'ogcClassD.cdl'
cdl_file = os.path.join(self.cdl_path, cdlname)
with open(cdl_file, 'w') as cdlf:
cdluri = '{}CDL/ogcClassD.cdl'.format(OGCFiles)
r = requests.get(cdluri)
if r.status_code != 200:
raise ValueError('CDL download failed for {}'.format(cdluri))
cdlf.write(r.text)
subprocess.check_call(['ncgen', '-o', tfile, cdl_file])
cdl_file_uri = 'http://secret.binary-array-ld.net/attributes.nc'
alias_dict = {'NetCDF': 'http://def.scitools.org.uk/NetCDF'}
root_container = bald.load_netcdf(tfile, baseuri=cdl_file_uri,
alias_dict=alias_dict, cache=self.acache)
ttl = root_container.rdfgraph().serialize(format='n3').decode("utf-8")
ttl_file = os.path.join(self.ttl_path, 'ogcClassA.ttl')
with open(ttl_file, 'w') as ttlf:
ttluri = '{}TTL/ogcClassD.ttl'.format(OGCFiles)
r = requests.get(ttluri)
if r.status_code != 200:
raise ValueError('TTL download failed for {}'.format(ttluri))
ttlf.write(r.text)
with open(ttl_file, 'r') as sf:
expected_ttl = sf.read()
os.remove(ttl_file)
os.remove(cdl_file)
self.assertEqual(expected_ttl, ttl)
|
binary-array-ld/bald
|
lib/bald/tests/integration/test_ogc_conformance.py
|
Python
|
bsd-3-clause
| 6,313
|
[
"NetCDF"
] |
4bd3ea3aa6b9bb98abfd701a579d8aac862e514289f4fce854378c0b35aa9d33
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# data set CIFAR
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
# tf.contrib.deprecated elimination bugfix for tf version 12
# tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
tf.histogram_summary(tensor_name + '/activations', x)
# tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.scalar_summary(tensor_name + '/sparsity',tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
# tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name + ' (raw)', l)
# tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
#tf.contrib.deprecated.scalar_summary('learning_rate', lr)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
#tf.contrib.deprecated.histogram_summary(var.op.name, var)
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
#tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
fvilca/cnn_tensorflow_cifar
|
cifar10.py
|
Python
|
mit
| 15,056
|
[
"Gaussian"
] |
5c67f08e07a4e309cfd83986db3dbefbf120a3a1c6936c502376451a8bd611bd
|
import msmbuilder.cluster, msmbuilder.msm, msmbuilder.example_datasets, msmbuilder.lumping, msmbuilder.utils
from sklearn.pipeline import make_pipeline
import mdtraj as md
n_micro = 750
#trajectories = [md.load("./trajectory%d.xtc" % i, top="./protein.pdb") for i in [1, 2]]
#trajectories = msmbuilder.example_datasets.fetch_2f4k()["trajectories"]
trajectories = msmbuilder.example_datasets.fetch_bpti()["trajectories"]
clusterer = msmbuilder.cluster.MiniBatchKMedoids(n_clusters=n_micro, metric="rmsd", batch_size=1000)
clusterer.fit(trajectories)
assignments = clusterer.predict(trajectories)
n_macro = 4
micromsm = msmbuilder.msm.MarkovStateModel(n_timescales=n_macro + 1)
lumper = msmbuilder.lumping.PCCAPlus(n_macro)
macromsm = msmbuilder.msm.MarkovStateModel(n_timescales=n_macro)
pipeline = make_pipeline(micromsm, lumper, macromsm)
macro_assignments = pipeline.fit_transform(assignments)
macromsm.transmat_
|
kyleabeauchamp/Caliber
|
src/cluster_new.py
|
Python
|
gpl-2.0
| 922
|
[
"MDTraj"
] |
aac7f91d24b374248a3372219e7c039d57fd29a4cb6ee2cbc42804def6334fcb
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
from alife_milestone1 import *
app=PlantBoxMilestone1()
|
PeterLauris/aifh
|
vol2/vol2-python-examples/examples/capstone_alife/run_milestone1.py
|
Python
|
apache-2.0
| 1,032
|
[
"VisIt"
] |
a711299d0a430d35dacf8e7828f66ad3c8373fb6621c291b3b77cf19c24defa1
|
"""
Parameterizes molecules for molecular dynamics simulations
"""
__version__ = '2.7.12'
__author__ = 'Robin Betz'
# Currently supported forcefields and information
supported_forcefields = {
"charmm": "CHARMM36m, July 2018 update",
"amber": "AMBER 14",
"opls": "OPLS AA/M, 2001 aminoacid dihedrals",
}
supported_water_models = {
"tip3": "TIP3 model, from W.L. Jorgensen, J.Chandrasekhar, J.D. Madura; "
"R.W. Impey, M.L. Klein; Comparison of simple potential functions "
"for simulating liquid water; J. Chem. Phys. 79 926-935 (1983).",
"tip4e": "TIP4P-Ewald, from H.W. Horn, W.C Swope, J.W. Pitera, J.D. Madura,"
" T.J. Dick, G.L. Hura, T. Head-Gordon; J. Chem. Phys. "
"120: 9665-9678 (2004)",
"spce": "SPC/E model, from H.J.C. Berendsen, J. R. Grigera, "
"T. P. Straatsma; The Missing Term in Effective Pair "
"Potentials; J. Phys. Chem 1987, 91, 6269-6271 (1987)",
}
from dabble.param.moleculematcher import MoleculeMatcher
from dabble.param.ambermatcher import AmberMatcher
from dabble.param.charmmmatcher import CharmmMatcher, Patch
from dabble.param.gromacsmatcher import GromacsMatcher
from dabble.param.writer import MoleculeWriter
from dabble.param.amber import AmberWriter
from dabble.param.charmm import CharmmWriter
from dabble.param.gromacs import GromacsWriter
from dabble.param.lammps import LammpsWriter
|
Eigenstate/dabble
|
dabble/param/__init__.py
|
Python
|
gpl-2.0
| 1,426
|
[
"Amber",
"CHARMM",
"Gromacs",
"LAMMPS"
] |
aa6b9708f16fd59434f195d9b3a62cde35d98b18391bcf87a17669ca93c412a2
|
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# Copyright (c) 2009-2010 Arista Networks, Inc.
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""basic checker for Python code
"""
from logilab import astng
from logilab.common.compat import any
from logilab.common.ureports import Table
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker
import re
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
del re
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astng.For, astng.ListComp, astng.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
nice_stats[node_type] = {}
total = stats[node_type]
if total == 0:
doc_percent = 0
badname_percent = 0
else:
documented = total - stats['undocumented_'+node_type]
doc_percent = float((documented)*100) / total
badname_percent = (float((stats['badname_'+node_type])*100)
/ total)
nice_stats[node_type]['percent_documented'] = doc_percent
nice_stats[node_type]['percent_badname'] = badname_percent
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
'%.2f' % nice_stats[node_type]['percent_documented'],
'%.2f' % nice_stats[node_type]['percent_badname'])
sect.append(Table(children=lines, cols=6, rheaders=1))
MSGS = {
'E0100': ('__init__ method is a generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'Used when the special class method __init__ has an explicit \
return value.'),
'E0102': ('%s already defined line %s',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).'),
'E0107': ("Use of the non-existent %s operator",
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'W0101': ('Unreachable code',
'Used when there is some code behind a "return" or "raise" \
statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'Used when a mutable value as list or dictionary is detected in \
a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'Used when a statement doesn\'t have (or at least seems to) \
any effect.'),
'W0105': ('String statement has no effect',
'Used when a string is used as a statement (which of course \
has no effect). This is a particular case of W0104 with its \
own message so you can easily disable it if you\'re using \
those strings as documentation, instead of comments.'),
'W0107': ('Unnecessary pass statement',
'Used when a "pass" statement that can be avoided is '
'encountered.)'),
'W0108': ('Lambda may not be necessary',
'Used when the body of a lambda expression is a function call \
on the same argument list as the lambda itself; such lambda \
expressions are in all but a few cases replaceable with the \
function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
"Used when a dictionary expression binds the same key multiple \
times."),
'W0122': ('Use of the exec statement',
'Used when you use the "exec" statement, to discourage its \
usage. That doesn\'t mean you can not use it !'),
'W0141': ('Used builtin function %r',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
"Used when a break or a return statement is found inside the \
finally clause of a try...finally block: the exceptions raised \
in the try clause will be silently swallowed instead of being \
re-raised."),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0102': ('Black listed name "%s"',
'Used when the name is listed in the black list (unauthorized \
names).'),
'C0103': ('Invalid name "%s" (should match %s)',
'Used when the name doesn\'t match the regular expression \
associated to its type (constant, variable, class...).'),
'C0111': ('Missing docstring', # W0131
'Used when a module, function, class or method has no docstring.\
Some special methods like __init__ doesn\'t necessary require a \
docstring.'),
'C0112': ('Empty docstring', # W0132
'Used when a module, function, class or method has an empty \
docstring (it would be too easy ;).'),
'C0121': ('Missing required attribute "%s"', # W0103
'Used when an attribute required for modules is missing.'),
}
class BasicChecker(BaseChecker):
"""checks for :
* doc strings
* modules / classes / functions / methods / arguments / variables name
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IASTNGChecker
name = 'basic'
msgs = MSGS
priority = -1
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'functions or classes name which do not require a '
'docstring'}
),
## ('min-name-length',
## {'default' : 3, 'type' : 'int', 'metavar' : '<int>',
## 'help': 'Minimal length for module / class / function / '
## 'method / argument / variable names'}
## ),
('module-rgx',
{'default' : MOD_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module names'}
),
('const-rgx',
{'default' : CONST_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module level names'}
),
('class-rgx',
{'default' : CLASS_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'class names'}
),
('function-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'function names'}
),
('method-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'method names'}
),
('attr-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'instance attribute names'}
),
('argument-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'argument names'}),
('variable-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'variable names'}
),
('inlinevar-rgx',
{'default' : COMP_VAR_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'list comprehension / generator expression variable \
names'}
),
('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('bad-functions',
{'default' : ('map', 'filter', 'apply', 'input'),
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = ( ('R0101', 'Statistics by type', report_by_type_stats), )
def __init__(self, linter):
BaseChecker.__init__(self, linter)
self.stats = None
self._returns = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._returns = []
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0,
badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
self._check_name('module', node.name.split('.')[-1], node)
self._check_docstring('module', node)
self._check_required_attributes(node, self.config.required_attributes)
def visit_class(self, node):
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
self._check_name('class', node.name, node)
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
self._check_redefinition('class', node)
for attr, anodes in node.instance_attrs.items():
self._check_name('attr', attr, anodes[0])
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astng.Const) and isinstance(expr.value, basestring):
# treat string statement in a separated message
self.add_message('W0105', node=node)
return
# ignore if this is :
# * a function call (can't predicate side effects)
# * the unique children of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
if not (any(expr.nodes_of_class((astng.CallFunc, astng.Yield)))
or isinstance(node.parent, astng.TryExcept) and node.parent.body == [node]):
self.add_message('W0104', node=node)
def visit_pass(self, node):
"""check is the pass statement is really necessary
"""
# if self._returns is empty, we're outside a function !
if len(node.parent.child_sequence(node)) > 1:
self.add_message('W0107', node=node)
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astng.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astng >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astng.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astng.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in xrange(len(ordinary_args)):
if not isinstance(call.args[i], astng.Name):
return
if node.args.args[i].name != call.args[i].name:
return
self.add_message('W0108', line=node.fromlineno, node=node)
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
is_method = node.is_method()
self._returns.append([])
f_type = is_method and 'method' or 'function'
self.stats[f_type] += 1
# function name
self._check_name(f_type, node.name, node)
# docstring
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring(f_type, node)
# check default arguments'value
self._check_defaults(node)
# check arguments name
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
# check for redefinition
self._check_redefinition(is_method and 'method' or 'function', node)
def leave_function(self, node):
"""most of the work is done here on close:
checks for max returns, branch, return in __init__
"""
returns = self._returns.pop()
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('E0100', node=node)
else:
values = [r.value for r in returns]
if [v for v in values if not (v is None or
(isinstance(v, astng.Const) and v.value is None)
or (isinstance(v, astng.Name) and v.name == 'None'))]:
self.add_message('E0101', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
for retnode in returns:
if isinstance(retnode, astng.Return) and \
isinstance(retnode.value, astng.Const) and \
retnode.value.value is not None:
self.add_message('E0106', node=node,
line=retnode.fromlineno)
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, (astng.Comprehension, astng.Comprehension)):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astng.Module):
if isinstance(ass_type, astng.Assign) and not in_loop(ass_type):
self._check_name('const', node.name, node)
elif isinstance(frame, astng.Function):
# global introduced variable aren't in the function locals
if node.name in frame:
self._check_name('variable', node.name, node)
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# if self._returns is empty, we're outside a function !
if not self._returns:
self.add_message('E0104', node=node)
return
self._returns[-1].append(node)
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astng.Function,))
def visit_yield(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
# if self._returns is empty, we're outside a function !
if not self._returns:
self.add_message('E0105', node=node)
return
self._returns[-1].append(node)
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
self._check_in_loop(node, 'continue')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
self._check_in_loop(node, 'break')
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astng.For, astng.While,))
def visit_raise(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('W0122', node=node)
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astng.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (node.frame().has_key(name) or
node.root().has_key(name)):
if name in self.config.bad_functions:
self.add_message('W0141', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astng.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # W0142 can be skipped
self.add_message('W0142', node=node.func)
def visit_unaryop(self, node):
"""check use of the non-existent ++ adn -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astng.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('E0107', node=node, args=node.op*2)
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astng.Tuple) and \
len(node.test.elts) == 2:
self.add_message('W0199', line=node.fromlineno, node=node)
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, v in node.items:
if isinstance(k, astng.Const):
key = k.value
if key in keys:
self.add_message('W0109', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('W0101', node=unreach_stmt)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astng.For, astng.While)):
break
_node = _node.parent
else:
self.add_message('E0103', node=node, args=node_name)
def _check_redefinition(self, redef_type, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('E0102', node=node,
args=(redef_type, defined_self.fromlineno))
def _check_docstring(self, node_type, node):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
self.stats['undocumented_'+node_type] += 1
self.add_message('C0111', node=node)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('C0112', node=node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
#if type(arg) is type(''):
if isinstance(arg, astng.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _check_name(self, node_type, name, node):
"""check for a name using the type's regexp"""
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('C0102', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
if regexp.match(name) is None:
self.add_message('C0103', node=node, args=(name, regexp.pattern))
self.stats['badname_' + node_type] += 1
def _check_defaults(self, node):
"""check for dangerous default values as arguments"""
for default in node.args.defaults:
try:
value = default.infer().next()
except astng.InferenceError:
continue
if isinstance(value, (astng.Dict, astng.List)):
if value is default:
msg = default.as_string()
else:
msg = '%s (%s)' % (default.as_string(), value.as_string())
self.add_message('W0102', node=node, args=(msg,))
def _check_required_attributes(self, node, attributes):
"""check for required attributes"""
for attr in attributes:
if not node.has_key(attr):
self.add_message('C0121', node=node, args=attr)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('W0150', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicChecker(linter))
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/checkers/base.py
|
Python
|
bsd-3-clause
| 30,316
|
[
"VisIt"
] |
da2d6c402385bd1796921d3fca369c853ade89148b59653dfa7203bc84759614
|
from . import config
from . import utils
from . import core
from . import io
from .external import Structure
from .core import *
from .DFT import *
from .Abinit import *
from .utils import *
from .flows import *
|
trangel/OPTpy
|
OPTpy/__init__.py
|
Python
|
gpl-3.0
| 213
|
[
"ABINIT"
] |
fa4c698f19383e40a5e4392091b28f3df58ed7120955203c96a98cc122d66b10
|
"""
=================
Lorentzian Fitter
=================
"""
from __future__ import print_function
import numpy
from numpy.ma import median
from numpy import pi
from ...mpfit import mpfit
from . import fitter
from astropy.extern.six.moves import xrange
class LorentzianFitter(fitter.SimpleFitter):
def __init__():
self.npars = 3
self.npeaks = 1
self.onepeaklorentzfit = self._fourparfitter(self.onepeaklorentzian)
def __call__(self,*args,**kwargs):
return self.multilorentzfit(*args,**kwargs)
def onedlorentzian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A/(2*pi)*w/((x-dx)**2 + (w/2.0)**2)
def n_lorentzian(pars=None,a=None,dx=None,width=None):
"""
Returns a function that sums over N lorentzians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
width - line widths (Lorentzian FWHM)
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
width = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(width) == len(a)):
raise ValueError("Wrong array lengths! dx: %i width %i a: %i" % (len(dx),len(width),len(a)))
def L(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] / (2*pi) * w / ((x-dx)**2 + (w/2.0)**2)
return v
return L
def multilorentzfit(self):
"""
not implemented
"""
print("Not implemented")
|
e-koch/pyspeckit
|
pyspeckit/spectrum/models/lorentzian.py
|
Python
|
mit
| 1,941
|
[
"Gaussian"
] |
5fad526e84854b14f58a85d79c1e1e3f76cc522865f5b9270b05152de0f2152a
|
__author__ = 'Victoria'
#from src.game.Creature import *
from src.common.Observable import *
from src.game.Room2 import *
from src.game.StatePattern4 import *
from random import *
class Monster(Observable, Observer, Room):
def __init__(self, name):
super(Monster, self).__init__()
self._name = name
self.ready = None
self._health = 1
self._healthMax = 10
self.roamState = RoamState(self,name)
self.attackState = AttackState(self, name)
self.runState = RunState(self,name)
self.restState = RestState(self,name)
self.currentState = None
self.setRoam()
def accept(self, visitor):
visitor.visit(self)
def visit(self, room):
room.monster(self)
room.occupy(self)
def roam(self):
self.ready = True
print ("%s am a monster roaming" % self._name)
self.notifyObservers()
#self.doDamage()
def doDamage(self):
self.damage = min(
max(randint(0, 2) - randint(0, 2), 0),
self._health)
self._health -= self.damage
if self.damage == 0:
print ("monster avoids heros's attack.")
else:
print ("hero injures monster!")
self._health -= 1
if self._health <= 0:
print ("Monster died!")
def act(self):
if self.ready:
self.roam()
self.ready = False
def update(self, Observable):
self.roam()
def display(self):
print ("%s am a monster about to attack you!" % self._name)
def northB(self):
self.ready = True
self.currentState.north()
def south(self):
self.ready = True
self.currentState.south()
def east(self):
self.ready = True
self.currentState.east()
def west(self):
self.ready = True
self.currentState.west()
def setRoam(self):
self.currentState = self.roamState
def setAttack(self):
self.currentState = self.attackState
def northward(self):
self.currentState.north()
def west(self):
self.currentState.west()
def east(self):
self.currentState.east()
def south(self):
self.currentState.south()
def attacking(self):
self.currentState.attack()
|
victorianorton/SimpleRPGGame
|
src/game/Monsters2.py
|
Python
|
mit
| 2,335
|
[
"VisIt"
] |
4485f4318465f834d8c568346a4f90a4780e7d7e9b5ce90a2c5cb327171f3f93
|
# Functions for a modified version of the PhiGs algorithm
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1523372/
# (We've added the use of synteny)
import sys,numpy,random
from scipy.signal import find_peaks
from . import trees,scores
from .Family import *
from .analysis import printTable
#### Main function
def createFamiliesO(tree,strainNamesT,scoresO,genesO,aabrhHardCoreL,paramD,subtreeD,outputSummaryF):
'''Given a graph of genes and their similarity scores find families
using a PhiGs-like algorithm, with synteny also considered.
Here's a hierarchy of (major) function calls below this:
createFamiliesO
createAllFamiliesDescendingFromInternalNode
createFamilyFromSeed
createAllFamiliesAtTip
createAllLocusFamiliesDescendingFromInternalNode
createAllLocusFamiliesAtOneInternalNode
createLocusFamilyFromSeed
createAllLocusFamiliesAtTip
'''
# checks
homologyCheck(genesO,aabrhHardCoreL,scoresO,outputSummaryF,paramD)
# initialize scoresO.nodeConnectD and scoresO.ScoreSummaryD
scoresO.createNodeConnectD()
scoresO.createAabrhScoreSummaryD(strainNamesT,aabrhHardCoreL,genesO)
# create an object of class Families to store this in.
familiesO = Families(tree)
famNumCounter = 0
locusFamNumCounter = 0
# other assorted things we'll need
# geneUsedD keeps track of which genes have been used. Restricting
# to only those genes in the tree
geneUsedD = {gene: False for gene in genesO.iterGenes(strainNamesT)}
nodeGenesD = createNodeGenesD(strainNamesT,genesO) # has genes divided by node
tipFamilyRawThresholdD = getTipFamilyRawThresholdD(tree,scoresO,paramD)
# get thresholds for family formation
absMinRawThresholdForHomologyD = getAbsMinRawThresholdForHomologyD(paramD,scoresO,genesO,aabrhHardCoreL)
synThresholdD = getSynThresholdD(paramD,scoresO,genesO,aabrhHardCoreL,tree)
printThresholdSummaryFile(paramD,absMinRawThresholdForHomologyD,synThresholdD)
# family formation
for familyMrca,lchild,rchild in createNodeProcessOrderList(tree):
# this is preorder, so we get internal nodes before tips
if lchild != None:
# not a tip
geneUsedD,locusFamNumCounter,famNumCounter,familiesO = createAllFamiliesDescendingFromInternalNode(subtreeD,familyMrca,nodeGenesD,scoresO,genesO,absMinRawThresholdForHomologyD,synThresholdD,paramD,geneUsedD,familiesO,famNumCounter,locusFamNumCounter)
else:
geneUsedD,locusFamNumCounter,famNumCounter,familiesO = createAllFamiliesAtTip(nodeGenesD,familyMrca,geneUsedD,tipFamilyRawThresholdD,scoresO,genesO,absMinRawThresholdForHomologyD,synThresholdD,paramD,familiesO,famNumCounter,locusFamNumCounter)
# Write family formation summary file
summaryL=[]
summaryL.append(["Total number of Families",str(len(familiesO.familiesD))])
summaryL.append(["Total number of LocusFamilies",str(len(familiesO.locusFamiliesD))])
singleLfFams = 0
multipleLfFams = 0
singleStrainFams = 0
multipleStrainFams = 0
for fam in familiesO.iterFamilies():
# locus families
if len(fam.getLocusFamilies()) == 1:
singleLfFams += 1
else:
multipleLfFams += 1
# strains
if len(set(fam.iterStrains())) == 1:
singleStrainFams += 1
else:
multipleStrainFams += 1
summaryL.append(["Number of families with one LocusFamily",str(singleLfFams)])
summaryL.append(["Number of families with multiple LocusFamilies",str(multipleLfFams)])
summaryL.append(["Number of families with gene(s) in only one strain",str(singleStrainFams)])
summaryL.append(["Number of families with genes in multiple strains",str(multipleStrainFams)])
printTable(summaryL,indent=0,fileF=outputSummaryF)
writeFamilies(familiesO,genesO,strainNamesT,paramD)
return familiesO
## Support functions
def homologyCheck(genesO,aabrhHardCoreL,scoresO,outputSummaryF,paramD):
'''Check if the number of genes in the hard core is low. Print warning if so. Also check for homology peak in raw scores histogram.'''
## Check number of hard core genes
# figure out average number of genes per genome
numGenes=0
for rangeStart,rangeEnd in genesO.geneRangeByStrainD.values():
if rangeEnd > numGenes:
numGenes = rangeEnd
avNumGenesPerGenome = (numGenes+1) / len(genesO.geneRangeByStrainD)
propHC = round(len(aabrhHardCoreL) / avNumGenesPerGenome,3)
if propHC < 0.2:
print("Warning: the hard core gene set has only",len(aabrhHardCoreL),"genes. This is",propHC,file=outputSummaryF)
print(""" of the average number of genes in the input genomes. A low number
of core genes might result from one or more species being too
distantly related, and could be associated with problems in family and
island formation.""",file=outputSummaryF)
## Check for homology (right) peak in raw scores histogram
scoreHistNumBins = paramD['scoreHistNumBins']
binWidth = 1.0/scoreHistNumBins # since scores range from 0-1
homologousPeakMissingL = []
for strainPair in scoresO.getStrainPairs():
scoreIterator = scoresO.iterateScoreByStrainPair(strainPair,'rawSc')
binHeightL,indexToBinCenterL = scoreHist(scoreIterator,scoreHistNumBins)
homologPeakLeftExtremePos=homologPeakChecker(binHeightL,indexToBinCenterL,binWidth,paramD)
if homologPeakLeftExtremePos == float('inf'):
homologousPeakMissingL.append(strainPair)
if homologousPeakMissingL != []:
print("""Warning: for one or more strain pairs, we failed to find a
homologous (right) peak in the raw score histogram. The pair(s) in
question are:""",file=outputSummaryF)
for pairT in homologousPeakMissingL:
print(" ",pairT[0]+'-'+pairT[1],file=outputSummaryF)
print(""" A possible explanation for this failure is that one or more species
is too distantly related. If this is the case it will result in poor
family formation, and most families (e.g. 80% or more) will have genes
in only one strain."""+"\n",file=outputSummaryF)
def createNodeGenesD(strainNamesT,genesO):
'''Create a data structure to organize genes by strain. Returns a dict
where key is strain name and the elements are sets.
'''
nodeGenesD = {strainName:[] for strainName in strainNamesT}
for strainName in strainNamesT:
for geneNum in genesO.iterGenesStrain(strainName):
nodeGenesD[strainName].append(geneNum)
return nodeGenesD
def createNodeProcessOrderList(tree):
'''Given a tree, output a list specifying nodes in pre-order (ancestors
before their descendants). For each node we give a tuple (node #, left
node #, right node #).
'''
if tree[1] == ():
return [(tree[0], None, None)]
else:
l = createNodeProcessOrderList(tree[1])
r = createNodeProcessOrderList(tree[2])
return [(tree[0], tree[1][0], tree[2][0])] + l + r
def getTipFamilyRawThresholdD(tree,scoresO,paramD):
'''Return a dictionary containing a raw score threshold for each
tip. This threshold is for use in forming families on the tip,
defining the minimum distance within which we will combine two genes
into a family.'''
tipFamilyRawThresholdD = {}
for leaf in trees.leafList(tree):
# get average score at core genes for neighbors
# put in call to get average...
threshold,std = getNearestNeighborAverageScore(leaf,tree,scoresO)
# multiply in an adjustment parameter (since average core gene
# scores of neighbors would be way too high)
threshold *= paramD['singleStrainFamilyThresholdAdjust']
tipFamilyRawThresholdD[leaf] = threshold
return tipFamilyRawThresholdD
def getNearestNeighborAverageScore(species,tree,scoresO):
'''Get all the nearest neighbors of species, and collect the average
score for each against species at aabrh core genes. Return the average
of this. Assumes that scoreSummaryD has been initialized in
scoresO.
'''
neighbL = trees.getNearestNeighborL(species,tree)
avScore = 0
avStd = 0
for neighb in neighbL:
sc,std = scoresO.scoreSummaryD[(species,neighb)]
avScore += sc
avStd += std
avScore /= len(neighbL)
avStd /= len(neighbL)
return avScore,avStd
## Histograms and thresholds
def getAbsMinRawThresholdForHomologyD(paramD,scoresO,genesO,aabrhHardCoreL):
'''For each pair of strains (including vs. self) determine a minimum
raw score below which we take scores to indicate non-homology. Return
in a dictionary keyed by strain pair. This function works as
follows. It calls quantile on the aabrh scores for each pair, and gets
a homologDistLeftExtremePos for that. It also calculates the position
of the left (nonhomologous) peak in the histogram of all scores
between the strainPair. It takes the min of the right extreme of this,
the left extreme from aabrh scores, and
defaultAbsMinRawThresholdForHomology.
'''
quantileForMinRawThreshold = paramD['quantileForMinRawThreshold']
scoreHistNumBins = paramD['scoreHistNumBins']
binWidth = 1.0/scoreHistNumBins # since scores range from 0-1
homologyRawThresholdD = {}
for strainPair in scoresO.getStrainPairs():
# get all scores
scoreIterator = scoresO.iterateScoreByStrainPair(strainPair,'rawSc')
binHeightL,indexToBinCenterL = scoreHist(scoreIterator,scoreHistNumBins)
# get only scores from aabrhHardCore pairs
aabrhScL = scores.getScoresStrainPair(scoresO,strainPair,'rawSc',genesO,aabrhHardCoreL)
homologDistLeftExtremePos = numpy.quantile(aabrhScL,quantileForMinRawThreshold)
threshold = getMinRawThreshold(binHeightL,indexToBinCenterL,binWidth,homologDistLeftExtremePos,paramD)
homologyRawThresholdD[strainPair] = threshold
return homologyRawThresholdD
def scoreHist(scoreIterator,scoreHistNumBins):
'''Get a histogram with numpy, and return the bin height, and also a
list of indices to the middle position of each bin (in terms of the x value).'''
binHeightL,edges = numpy.histogram(list(scoreIterator),bins=scoreHistNumBins,density=True)
# make a list where the indices correspond to those of binHeightL,
# and the values give the score value at the center of that bin
indexToBinCenterL = []
for i in range(1,len(edges)):
center = (edges[i]+edges[i-1])/2
indexToBinCenterL.append(center)
return binHeightL,indexToBinCenterL
def homologPeakChecker(binHeightL,indexToBinCenterL,binWidth,paramD):
'''Function to check for a peak due to homogology (right peak in
histogram). If such a peak exists, this function returns the position
(in score units) of the left most base of that peak. If no such peak
exits, this function returns infinity.
'''
peakL = [] # to collect in
# in order to get a rightmost peak (if any) we add a dummy bin of
# height 0 on right. add 1 to indexToBinCenterL for case of right
# base of a peak in the last bin.
tempBinHeightL = numpy.append(binHeightL,0)
tempIndexToBinCenterL = numpy.append(indexToBinCenterL,1)
# case 1 (normal case)
L = findPeaksOneCase(tempBinHeightL,tempIndexToBinCenterL,binWidth,paramD['homologPeakWidthCase1'],paramD['widthRelHeight'],paramD['homologRequiredProminenceCase1'],paramD['homologLeftPeakLimitCase1'],paramD['homologRightPeakLimit'])
peakL.extend(L)
# case 2 (extreme prominence. But allow to be very narrow)
L = findPeaksOneCase(tempBinHeightL,tempIndexToBinCenterL,binWidth,paramD['homologPeakWidthCase2'],paramD['widthRelHeight'],paramD['homologRequiredProminenceCase2'],paramD['homologLeftPeakLimitCase2'],paramD['homologRightPeakLimit'])
peakL.extend(L)
# case 3 (wide width with low prominence)
L = findPeaksOneCase(tempBinHeightL,tempIndexToBinCenterL,binWidth,paramD['homologPeakWidthCase3'],paramD['widthRelHeight'],paramD['homologRequiredProminenceCase3'],paramD['homologLeftPeakLimitCase3'],paramD['homologRightPeakLimit'])
peakL.extend(L)
if peakL == []:
return float('inf')
else:
# if there's more than one, we'll return the leftBasePos of the highest.
peakL.sort(reverse=True)
return peakL[0][2]
def findPeaksOneCase(binHeightL,indexToBinCenterL,binWidth,peakWidth,widthRelHeight,requiredProminence,leftPeakLimit,rightPeakLimit):
'''Make one call to find_peaks. Peaks must be wider than peakWidth
(which is given in units of score) and more prominent than
requiredProminence, and fall between leftPeakLimit and
rightPeakLimit. Returns tuple
(peakHeight,peakPos,leftExtremeOfPeakPos,rightExtremeOfPeakPos) of any peaks
that meet criteria. All position values are returned in units of
score.
'''
peakWidthInBins = peakWidth / binWidth
# we always measure width widthRelHeight down from peak toward base
peakIndL, propertiesD = find_peaks(binHeightL, width = peakWidthInBins, rel_height = widthRelHeight, prominence = requiredProminence)
# make sure they are to the right of leftPeakLimit
peakPosInScoreUnitsL = []
for i in range(len(peakIndL)):
peakInd = peakIndL[i]
peakHeight = binHeightL[peakInd]
peakPos = indexToBinCenterL[peakInd]
if leftPeakLimit < peakPos <= rightPeakLimit:
# peak falls between the specified limits
leftExtremeOfPeakPosInd = int(round(propertiesD["left_ips"][i]))
leftExtremeOfPeakPos = indexToBinCenterL[leftExtremeOfPeakPosInd]
rightExtremeOfPeakPosInd = int(round(propertiesD["right_ips"][i]))
rightExtremeOfPeakPos = indexToBinCenterL[rightExtremeOfPeakPosInd]
peakPosInScoreUnitsL.append((peakHeight,peakPos,leftExtremeOfPeakPos,rightExtremeOfPeakPos))
return peakPosInScoreUnitsL
def getMinRawThreshold(binHeightL,indexToBinCenterL,binWidth,homologDistLeftExtremePos,paramD):
'''Given a list of bin heights and another list giving the score
values at the middle of each bin, determine a threshold below which a
score should be taken to indicate non-homology. We do this by looking
for the non-homologous (left) peak in the score histogram. This
function assumes there is a right homologous peak present and takes
the left extreme of this peak as input.
'''
L = findPeaksOneCase(binHeightL,indexToBinCenterL,binWidth,paramD['nonHomologPeakWidth'],paramD['widthRelHeight'],paramD['nonHomologPeakProminence'],paramD['nonHomologLeftPeakLimit'],paramD['nonHomologRightPeakLimit'])
if L == []:
# no peak found, use default threshold
threshold = min(paramD['defaultAbsMinRawThresholdForHomology'],homologDistLeftExtremePos)
else:
L.sort(reverse=True) # in the unlikely case there's more than one
peakHeight,peakPos,leftExtremeOfPeakPos,rightExtremeOfPeakPos = L[0]
# we now find the minimum of these two. We're after a threshold
# where we're confident that things below it are definitely not
# homologous.
threshold = min(rightExtremeOfPeakPos,homologDistLeftExtremePos)
return threshold
def getSynThresholdD(paramD,scoresO,genesO,aabrhHardCoreL,tree):
'''Creates a dictionary to store synteny thresholds. This dictionary
itself contains three dictionaries one each for minCoreSyntThresh
(minimum core synteny score allowed for family formation),
minSynThresh (minimum synteny score allowed for family formation) the
synAdjustThreshold (the synteny score above which we adjust up the raw
score to make family formation more likely.) These dictionaries in
turn are keyed by strain pair.
'''
quantileForObtainingSynThresholds = paramD['quantileForObtainingSynThresholds']
multiplierForObtainingSynThresholds = paramD['multiplierForObtainingSynThresholds']
quantileForObtainingSynAdjustThreshold = paramD['quantileForObtainingSynAdjustThreshold']
synThresholdD = {}
synThresholdD['minSynThreshold'] = {}
synThresholdD['minCoreSynThreshold'] = {}
synThresholdD['synAdjustThreshold'] = {}
# coreSynSc
for strainPair in scoresO.getStrainPairs():
aabrhScL = scores.getScoresStrainPair(scoresO,strainPair,'coreSynSc',genesO,aabrhHardCoreL)
thresh = multiplierForObtainingSynThresholds * numpy.quantile(aabrhScL,quantileForObtainingSynThresholds)
synThresholdD['minCoreSynThreshold'][strainPair] = thresh
# synSc and synAdjust
for strainPair in scoresO.getStrainPairs():
aabrhScL = scores.getScoresStrainPair(scoresO,strainPair,'synSc',genesO,aabrhHardCoreL)
thresh = multiplierForObtainingSynThresholds * numpy.quantile(aabrhScL,quantileForObtainingSynThresholds)
synThresholdD['minSynThreshold'][strainPair] = thresh
adjustThresh = numpy.quantile(aabrhScL,quantileForObtainingSynAdjustThreshold)
synThresholdD['synAdjustThreshold'][strainPair] = adjustThresh
# In the case of family formation at a tip, we're interested in
# genes that duplicated after the last species split off. So the
# thresholds at a tip really shouldn't be based on the given
# genome against itself. Basing them instead on what we saw at the
# parent node (of the last divergence) seems reasonable, and is
# what we'll do here. We'll now replace the entries in
# synThresholdD for tips with the values from the parent node.
for strainPair in scoresO.getStrainPairs():
if strainPair[0] == strainPair[1]:
# Tip
leafStrain = strainPair[0]
synThresholdD['minCoreSynThreshold'][strainPair] = getTipThreshold(tree,leafStrain,synThresholdD,'minCoreSynThreshold')
synThresholdD['minSynThreshold'][strainPair] = getTipThreshold(tree,leafStrain,synThresholdD,'minSynThreshold')
synThresholdD['synAdjustThreshold'][strainPair] = getTipThreshold(tree,leafStrain,synThresholdD,'synAdjustThreshold')
return synThresholdD
def getTipThreshold(tree,leafStrain,synThresholdD,thresholdType):
'''Get the synteny threshold values at a tip called leafStrain by
averaging the values between that strain and its nearest neighbors.'''
neighbL = trees.getNearestNeighborL(leafStrain,tree)
avThresh = 0
for neighb in neighbL:
strainPair = tuple(sorted((neighb,leafStrain)))
avThresh += synThresholdD[thresholdType][strainPair]
avThresh /= len(neighbL)
return avThresh
def printThresholdSummaryFile(paramD,absMinRawThresholdForHomologyD,synThresholdD):
'''Given threshold dictionaries, print the various family formation thresholds to a summary file.'''
threshSummaryL = []
threshSummaryL.append(['Strain pair','absMinRawThresholdForHomology','minCoreSynThreshold','minSynThreshold','synAdjustThreshold'])
if 'familyFormationThresholdsFN' in paramD:
with open(paramD['familyFormationThresholdsFN'],'w') as familyFormationThresholdsF:
for strainPair in absMinRawThresholdForHomologyD:
threshSummaryL.append([" ".join(strainPair),str(round(absMinRawThresholdForHomologyD[strainPair],4)),str(round(synThresholdD['minCoreSynThreshold'][strainPair],4)),str(round(synThresholdD['minSynThreshold'][strainPair],4)),str(round(synThresholdD['synAdjustThreshold'][strainPair],4))])
printTable(threshSummaryL,indent=0,fileF=familyFormationThresholdsF)
#### Family creation functions
def createAllFamiliesDescendingFromInternalNode(subtreeD,familyMrca,nodeGenesD,scoresO,genesO,absMinRawThresholdForHomologyD,synThresholdD,paramD,geneUsedD,familiesO,famNumCounter,locusFamNumCounter):
'''Creates all Families and subsidiary LocusFamilies descending from
the node rooted familyMrca. Basic parts of the Phigs algorithm are
here. Creating the seeds, and using them to get a family. (With very
minor changes in the use of the synteny adjustment). But then we
divide the family into LocusFamilies, which is not Phigs.
'''
subtree=subtreeD[familyMrca]
# for use in createAllLocusFamiliesDescendingFromInternalNode call below
familySubtreeNodeOrderL = createNodeProcessOrderList(subtree)
leftS,rightS = createLRSets(subtreeD,familyMrca,nodeGenesD,None)
seedL = createSeedL(leftS,rightS,scoresO,genesO,absMinRawThresholdForHomologyD,paramD)
for seed in seedL:
# each seed corresponds to a prospective gene family.
seedRawSc,seedG1,seedG2 = seed
if seedRawSc == -float('inf'):
# we've gotten to the point in the seed list with
# genes having no match on the other branch
break
else:
# getting initial family, using only raw score and synteny bump
famS=createFamilyFromSeed(seedG1,seedG2,geneUsedD,scoresO,leftS,rightS,genesO,seedRawSc,absMinRawThresholdForHomologyD,synThresholdD,paramD)
if famS == None:
# one of the genes the the family was already
# used, so createFamilyFromSeed returned None
continue
else:
# none of the genes in famS used yet
for gene in famS:
geneUsedD[gene] = True
# now set up familiesO to take this family and
# determine the corresponding locusFamilies
familiesO.initializeFamily(famNumCounter,familyMrca,[seedG1,seedG2])
locusFamNumCounter,familiesO = createAllLocusFamiliesDescendingFromInternalNode(subtreeD,familyMrca,genesO,famS,[seedG1,seedG2],famNumCounter,locusFamNumCounter,scoresO,paramD,synThresholdD,familiesO,familySubtreeNodeOrderL,nodeGenesD)
famNumCounter+=1 # important to increment this after call to createAllLocusFamiliesDescendingFromInternalNode
return geneUsedD,locusFamNumCounter,famNumCounter,familiesO
def createAllFamiliesAtTip(nodeGenesD,familyMrca,geneUsedD,tipFamilyRawThresholdD,scoresO,genesO,absMinRawThresholdForHomologyD,synThresholdD,paramD,familiesO,famNumCounter,locusFamNumCounter):
'''Creates all Families and subsidiary LocusFamilies at the tip
given by familyMrca. Because we've come through the nodes in
pre-order, we know that all unused genes at this node are in a
families with mrca here. (they can still be multi gene families).
'''
unusedGenesAtThisTipS=set()
for gene in nodeGenesD[familyMrca]: # familyMrca is a tip
# gene is at this tip
if not geneUsedD[gene]:
# not used yet
unusedGenesAtThisTipS.add(gene)
# pull out the threshold we'll use given the strain
tipFamilyRawThreshold = tipFamilyRawThresholdD[familyMrca]
# Now we pull out families greedily. Simply take a gene,
# and get all other genes that isSameFamily says are shared
while len(unusedGenesAtThisTipS)>0:
seed = unusedGenesAtThisTipS.pop()
newFamS=set([seed])
for newGene in unusedGenesAtThisTipS:
if scoresO.isEdgePresentByEndNodes(seed,newGene):
addIt = isSameFamily(seed,newGene,scoresO,genesO,tipFamilyRawThreshold,absMinRawThresholdForHomologyD,synThresholdD,paramD)
if addIt:
newFamS.add(newGene)
# newFamS now contains a set of genes with significant
# similarity. Remove it from unusedGenesAtThisTipS,
# and create a new family from it.
unusedGenesAtThisTipS.difference_update(newFamS)
for gene in newFamS: # mark these as used
geneUsedD[gene] = True
familiesO.initializeFamily(famNumCounter,familyMrca)
lfOL,locusFamNumCounter = createAllLocusFamiliesAtTip(newFamS,genesO,familyMrca,scoresO,paramD,synThresholdD,famNumCounter,locusFamNumCounter)
for lfO in lfOL:
familiesO.addLocusFamily(lfO)
famNumCounter+=1 # important to increment this after creating LocusFamilies
return geneUsedD,locusFamNumCounter,famNumCounter,familiesO
def createLRSets(subtreeD,mrca,nodeGenesD,restrictS):
'''At given mrca, obtain all genes in species in left branch and put
in leftS, and all genes from species in right branch to
rightS. Restrict each of these to be only genes in restrictS. If
restrictS is None, then use all genes.
'''
subtree=subtreeD[mrca]
leftS=set()
for tip in trees.leafList(subtree[1]):
leftS.update(nodeGenesD[tip])
rightS=set()
for tip in trees.leafList(subtree[2]):
rightS.update(nodeGenesD[tip])
if restrictS != None:
leftS.intersection_update(restrictS)
rightS.intersection_update(restrictS)
return(leftS,rightS)
def closestMatch(gene,S,scoresO,genesO,absMinRawThresholdForHomologyD,paramD):
'''Find the closest match to gene among the genes in the set S in the
graph scoresO. Eliminate any matches that have a raw score below what
is in homologyHomologyRawThresholdD, a coreSynSc below
minCoreSynThresh, or a synteny score below synThresholdD.
'''
bestGene=None
bestEdgeScore = -float('inf')
connectL = scoresO.getConnectionsGene(gene)
if connectL != None:
for otherGene in connectL:
if otherGene in S:
if isSameFamily(gene,otherGene,scoresO,genesO,bestEdgeScore,absMinRawThresholdForHomologyD,None,paramD):
# we don't want to use synThresholdD, hence the Nones
bestEdgeScore = scoresO.getScoreByEndNodes(gene,otherGene,'rawSc')
bestGene = otherGene
return bestEdgeScore, gene, bestGene
def createSeedL(leftS,rightS,scoresO,genesO,absMinRawThresholdForHomologyD,paramD):
'''Create a list which has the closest match for each gene on the
opposite side of the tree. e.g. if a gene is in tree[1] then we're
looking for the gene in tree[2] with the closest match. We eliminate
any matches that are below threshold for normalized or syntenty
scores. For each gene we get this closest match, put in a list, sort,
and return.
'''
seedL=[]
for gene in leftS:
seedL.append(closestMatch(gene,rightS,scoresO,genesO,absMinRawThresholdForHomologyD,paramD))
for gene in rightS:
seedL.append(closestMatch(gene,leftS,scoresO,genesO,absMinRawThresholdForHomologyD,paramD))
seedL.sort(reverse=True)
return seedL
def createFamilyFromSeed(g1,g2,geneUsedD,scoresO,leftS,rightS,genesO,thisFamRawThresh,absMinRawThresholdForHomologyD,synThresholdD,paramD):
'''Based on a seed (seedScore, g1, g2) search for a family. Using the
PhiGs approach, we collect all genes which are closer to members of
the family than the two seeds are from each other. We have a raw score
threshold below which we will not add genes. We also have a synteny
adjustment of the raw score where we make the raw score between a pair
a bit better if their synteny is above the species pair specific
adjustThresh in synThresholdD. In general, if a gene has syntenic
connections to genes already in the family, this makes us more
confident that this gene belongs in the family. Returns a set
containing genes in the family.
'''
if geneUsedD[g1] or geneUsedD[g2]:
# one of these has been used already, stop now.
return None
famS = set()
genesToSearchForConnectionsS = set([g1,g2])
while len(genesToSearchForConnectionsS) > 0:
matchesS = getFamilyMatches(genesToSearchForConnectionsS,scoresO,leftS,rightS,famS,genesO,thisFamRawThresh,absMinRawThresholdForHomologyD,synThresholdD,paramD,geneUsedD)
if matchesS == None:
return None
famS.update(genesToSearchForConnectionsS)
genesToSearchForConnectionsS = matchesS
return famS
def getFamilyMatches(genesToSearchForConnectionsS,scoresO,leftS,rightS,famS,genesO,thisFamRawThresh,absMinRawThresholdForHomologyD,synThresholdD,paramD,geneUsedD):
''''''
matchesS=set()
for famGene in genesToSearchForConnectionsS:
for newGene in scoresO.getConnectionsGene(famGene):
if newGene in leftS or newGene in rightS:
# it is from a species descended from the node
# we're working on
if newGene not in genesToSearchForConnectionsS and newGene not in famS:
# it shouldn't been in our current list to search,
# or be one we've already put in the family (or
# we'll waste effort)
addIt = isSameFamily(famGene,newGene,scoresO,genesO,thisFamRawThresh,absMinRawThresholdForHomologyD,synThresholdD,paramD)
if addIt:
if geneUsedD[newGene]:
# this one's been used already. That
# means the whole family should be
# thrown out. Just stop now.
return None
else:
matchesS.add(newGene)
return matchesS
def isSameFamily(famGene,newGene,scoresO,genesO,thisFamRawThresh,absMinRawThresholdForHomologyD,synThresholdD,paramD):
'''Given famGene that is inside a family, and newGene we are
considering adding, check the various scores to determine if we should
add it. Return boolean.
'''
rawSc = scoresO.getScoreByEndNodes(famGene,newGene,'rawSc')
synSc = scoresO.getScoreByEndNodes(famGene,newGene,'synSc')
# get minThresh from absMinRawThresholdForHomologyD
strain1 = genesO.numToStrainName(famGene)
strain2 = genesO.numToStrainName(newGene)
strainPair = tuple(sorted([strain1,strain2]))
absoluteMinRawThresh = absMinRawThresholdForHomologyD[strainPair]
if synThresholdD == None:
synAdjustThresh = float('inf')
else:
# change later
synAdjustThresh = synThresholdD['synAdjustThreshold'][strainPair]
addIt = False
if rawSc < absoluteMinRawThresh:
# Raw score is simply too low, don't add newGene. Raw score is
# below the minimum value we've calculated for this species
# pair. (Modification of PhiGs)
pass
elif rawSc >= thisFamRawThresh:
# If its within the seed distance, add it
# (basic PhiGs approach). we have the =
# there in case thisFamRawThresh is 1.
addIt = True
elif synSc >= synAdjustThresh:
# its above the syn score adjustment
# threshold, so increase rawSc a bit. This
# addresses a problem with closely related
# families where the seed score is very
# similar. Sometimes by chance things
# that should have been added weren't
# because they weren't more similar than
# an already very similar seed.
# (Modification of PhiGs)
adjSc = rawSc * paramD['synAdjustExtent']
if adjSc > 1: adjSc = 1 # truncate back to 1
if adjSc >= thisFamRawThresh:
addIt = True
return addIt
def createAllLocusFamiliesDescendingFromInternalNode(subtreeD,familyMrca,genesO,famGenesToSearchS,seedPairL,famNumCounter,locusFamNumCounter,scoresO,paramD,synThresholdD,familiesO,familySubtreeNodeOrderL,nodeGenesD):
'''Given a family in famGenesToSearchS, break it up into subsidiary locus families
based on synteny. We iterate through the subtree rooted at familyMrca
in pre-order (ancestors first). Using seeds, we try to find groups
among famS that share high synteny.'''
# split out LocusFamilies at non-syntenic locations
for lfMrca,lchild,rchild in familySubtreeNodeOrderL:
if lchild != None:
# not a tip
lfOL,locusFamNumCounter,famGenesToSearchS = createAllLocusFamiliesAtOneInternalNode(subtreeD,lfMrca,nodeGenesD,genesO,famGenesToSearchS,scoresO,paramD,synThresholdD,famNumCounter,locusFamNumCounter)
for lfO in lfOL:
familiesO.addLocusFamily(lfO)
else:
# we're at a tip.
# get only the genes at this tip
genesAtThisTipS = famGenesToSearchS.intersection(nodeGenesD[lfMrca])
# remove them from famGenesToSearchS
famGenesToSearchS.difference_update(genesAtThisTipS)
# Get lf objects for all these genes
lfOL,locusFamNumCounter = createAllLocusFamiliesAtTip(genesAtThisTipS,genesO,lfMrca,scoresO,paramD,synThresholdD,famNumCounter,locusFamNumCounter)
# add to our families object
for lfO in lfOL:
familiesO.addLocusFamily(lfO)
return locusFamNumCounter,familiesO
def createAllLocusFamiliesAtOneInternalNode(subtreeD,lfMrca,nodeGenesD,genesO,famGenesToSearchS,scoresO,paramD,synThresholdD,famNumCounter,locusFamNumCounter):
'''Obtains all locus families at the internal node defined by lfMrca.'''
lfOL = []
while True:
lfSeedPairL = createLFSeed(subtreeD,lfMrca,nodeGenesD,genesO,famGenesToSearchS,scoresO,paramD,synThresholdD)
if lfSeedPairL == []:
# there are no (more) seeds stradling this internal node,
# break out
break
lfO,locusFamNumCounter,famGenesToSearchS = createLocusFamilyFromSeed(famNumCounter,locusFamNumCounter,lfMrca,lfSeedPairL,famGenesToSearchS,subtreeD,genesO,scoresO,paramD,synThresholdD)
lfOL.append(lfO)
return lfOL,locusFamNumCounter,famGenesToSearchS
def createLFSeed(subtreeD,lfMrca,nodeGenesD,genesO,famGenesToSearchS,scoresO,paramD,synThresholdD):
'''Given a set of genes famGenesToSearchS from a family, try to find a
seed based at lfMrca. A seed consists of two genes, one in the left
subtree and one in the right, which are syntenically consistent.
'''
leftS,rightS = createLRSets(subtreeD,lfMrca,nodeGenesD,famGenesToSearchS)
for lGene in leftS:
for rGene in rightS:
if isSameLocusFamily(lGene,rGene,scoresO,genesO,paramD,synThresholdD):
return [lGene,rGene]
return []
def createLocusFamilyFromSeed(famNumCounter,locusFamNumCounter,lfMrca,seedPairL,famGenesToSearchS,subtreeD,genesO,scoresO,paramD,synThresholdD):
'''Returns a LocusFamily object, containing genes associated with
those in seedPairL, in the subtree definied at lfMrca. Does single
linkage clustering, adding in anything in famGenesToSearchS with above
threshold synteny. Note that these seeds are not the seed from family formation (which might not be syntenic) but rather an independently generated pair which we know belong in the same LocusFamily
'''
lfO = LocusFamily(famNumCounter,locusFamNumCounter,lfMrca)
locusFamNumCounter+=1
famGenesToSearchS.difference_update(seedPairL)
lfO.addGenes(seedPairL,genesO)
subtree=subtreeD[lfMrca]
strainL = trees.leafList(subtree)
while True:
genesToAddS = getLocusFamilyMatches(lfO,famGenesToSearchS,genesO,strainL,scoresO,paramD,synThresholdD)
if len(genesToAddS) == 0:
break
famGenesToSearchS.difference_update(genesToAddS)
lfO.addGenes(genesToAddS,genesO)
return lfO,locusFamNumCounter,famGenesToSearchS
def getLocusFamilyMatches(lfO,famGenesToSearchS,genesO,strainL,scoresO,paramD,synThresholdD):
'''Given a LocusFamily object lfO and some remaining genes, search
through the remaining genes to find those that match syntenically and
are in a child species of lfMrca. Return a list of genes to add.
'''
genesToAddS=set()
for searchGene in famGenesToSearchS:
# test if searchGene is in a child species of lfMrca
if genesO.numToStrainName(searchGene) in strainL:
# this searchGene is in a strain that is a child of the lfMrca we're working on
for lfGene in lfO.iterGenes():
# we don't use absMinRawThreshold, thisFamRawThresh, or
# synAdjustThresh. If the pair have values above
# minCoreSynThresh and minSynThres, then addIt will be
# True.
addIt = isSameLocusFamily(searchGene,lfGene,scoresO,genesO,paramD,synThresholdD)
if addIt:
genesToAddS.add(searchGene)
break
return genesToAddS
def isSameLocusFamily(gene1,gene2,scoresO,genesO,paramD,synThresholdD):
'''Given two genes in the same family, determine if they meet the
synteny requirements to be put in the same LocusFamily. Returns
boolean.
'''
if not scoresO.isEdgePresentByEndNodes(gene1,gene2):
# Within our families, there may be some gene-gene edges
# missing due to the fact that blast could have just missed
# significance etc. If the edge isn't there, then we do not
# have evidence that these genes should be in the same locus
# family, and we return false.
# NOTE: An alternative approach would be to actually calculate
# these scores here. But they're likely to be low...
return False
coreSynSc = scoresO.getScoreByEndNodes(gene1,gene2,'coreSynSc')
synSc = scoresO.getScoreByEndNodes(gene1,gene2,'synSc')
strain1 = genesO.numToStrainName(gene1)
strain2 = genesO.numToStrainName(gene2)
strainPair = tuple(sorted([strain1,strain2]))
minSynThreshold = synThresholdD['minSynThreshold'][strainPair]
minCoreSynThreshold = synThresholdD['minCoreSynThreshold'][strainPair]
if coreSynSc < minCoreSynThreshold or synSc < minSynThreshold:
# one of the two types of synteny below threshold, so this
# pair doesn't meet the requirements for being in the same
# LocusFamily
addIt = False
else:
addIt = True
return addIt
def createAllLocusFamiliesAtTip(genesAtThisTipS,genesO,lfMrca,scoresO,paramD,synThresholdD,famNumCounter,locusFamNumCounter):
'''Given a set of genes famGenesToSearchS, search for all those found
on the tip given by lfMrca. Break these into LocusFamilies. Many will
be single gene LocusFamilies, but some may be multi-gene
'''
# Break these up into LocusFamilies. Many will be
# single gene LocusFamilies, but some may be multi-gene
lfGroupsL=[]
while len(genesAtThisTipS) > 0:
seed = genesAtThisTipS.pop()
currentGroupS=set([seed])
for gene in genesAtThisTipS:
addIt = isSameLocusFamily(seed,gene,scoresO,genesO,paramD,synThresholdD)
if addIt:
currentGroupS.add(gene)
genesAtThisTipS.difference_update(currentGroupS)
lfGroupsL.append(currentGroupS)
lfOL=[]
for lfGroupS in lfGroupsL:
lfO = LocusFamily(famNumCounter,locusFamNumCounter,lfMrca)
locusFamNumCounter+=1
lfO.addGenes(lfGroupS,genesO)
lfOL.append(lfO)
return lfOL,locusFamNumCounter
## xlMode
def getGeneSubsetFromLocusFamilies(familiesO,tree,numRepresentativeGenesPerLocFam,genesO):
'''Loop over all locus families and sample
numRepresentativeGenesPerLocFam from each.'''
# get some stuff from tree
leafL = trees.leafList(tree)
subtreeD=trees.createSubtreeD(tree)
numNodes = trees.nodeCount(tree)
for lfO in familiesO.iterLocusFamilies():
for geneNum in getGeneSubsetFromOneLocusFamily(lfO,numRepresentativeGenesPerLocFam,leafL,numNodes,genesO,subtreeD):
yield geneNum
def getGeneSubsetFromOneLocusFamily(lfO,numRepresentativeGenesPerLocFam,leafL,numNodes,genesO,subtreeD):
'''Sample numRepresentativeGenesPerLocFam genes from one
LocusFamily. This function simply divides the LocusFamily genes into
sets on the left and right branches, and attempts to take a similar
sized sample from each.'''
lfGenesL = list(lfO.iterGenes())
if len(lfGenesL) <= numRepresentativeGenesPerLocFam:
return lfGenesL
elif lfO.lfMrca in leafL:
# it's a tip
return random.sample(lfGenesL,numRepresentativeGenesPerLocFam)
else:
# divide up the genes by node
subtree = subtreeD[lfO.lfMrca]
subtreeNodeGenesD = {strainName:[] for strainName in trees.nodeList(subtree)}
for geneNum in lfGenesL:
strainName = genesO.numToStrainName(geneNum)
subtreeNodeGenesD[strainName].append(geneNum)
# get ones from left and right
leftS,rightS = createLRSets(subtreeD,lfO.lfMrca,subtreeNodeGenesD,None)
numLeft = min(len(leftS),numRepresentativeGenesPerLocFam // 2)
numRight = min(len(rightS),numRepresentativeGenesPerLocFam - numLeft)
sampleL = random.sample(leftS,numLeft) + random.sample(rightS,numRight)
return sampleL
## Input/output
def writeFamilies(familiesO,genesO,strainNamesT,paramD):
'''Write all gene families to fileName, one family per line.'''
familyFN = paramD['familyFN']
geneInfoFN = paramD['geneInfoFN']
# get the num to name dict, only for strains we're looking at.
genesO.initializeGeneNumToNameD(geneInfoFN,set(strainNamesT))
f=open(familyFN,'w')
for fam in familiesO.iterFamilies():
f.write(fam.fileStr(genesO)+'\n')
f.close()
def readFamilies(familyFN,tree,genesO):
'''Read the family file named familyFN, creating a Families object.
'''
familiesO = Families(tree)
f=open(familyFN,'r')
while True:
s=f.readline()
if s=='':
break
L=s.split('\t')
famNum=int(L[0])
mrca = L[1]
if L[2] == "-":
seedPairL = None
else:
seedPairL = [L[2],L[3]]
lfL = L[4:]
familiesO.initializeFamily(famNum,mrca,seedPairL)
for lfStr in lfL:
lfSplitL = lfStr.rstrip().split(',')
locusFamNum=int(lfSplitL[0])
lfMrca = lfSplitL[1]
geneL=[]
for geneName in lfSplitL[2:]:
geneNum = int(geneName.split('_')[0])
geneL.append(geneNum)
lfO = LocusFamily(famNum,locusFamNum,lfMrca)
lfO.addGenes(geneL,genesO)
familiesO.addLocusFamily(lfO)
f.close()
return familiesO
|
ecbush/xtrans
|
xenoGI/families.py
|
Python
|
gpl-3.0
| 42,255
|
[
"BLAST"
] |
9a6dcbf4d42f6a7f8f6f60bb5385deab8b1de9c157fd7f50ee7d0f4bdacfe5c0
|
"""Ladybug configurations.
Import this into every module where access configurations are needed.
Usage:
from ladybug.config import folders
print(folders.default_epw_folder)
folders.default_epw_folder = "C:/epw_data"
"""
import os
import json
class Folders(object):
"""Ladybug folders.
Args:
config_file: The path to the config.json file from which folders are loaded.
If None, the config.json module included in this package will be used.
Default: None.
mute: If False, the paths to the various folders will be printed as they
are found. If True, no printing will occur upon initialization of this
class. Default: True.
Properties:
* ladybug_tools_folder
* default_epw_folder
* config_file
* mute
"""
def __init__(self, config_file=None, mute=True):
# set the mute value
self.mute = bool(mute)
# load paths from the config JSON file
self.config_file = config_file
@property
def ladybug_tools_folder(self):
"""Get or set the path to the ladybug tools installation folder."""
return self._ladybug_tools_folder
@ladybug_tools_folder.setter
def ladybug_tools_folder(self, path):
if not path: # check the default location for epw files
path = self._find_default_ladybug_tools_folder()
self._ladybug_tools_folder = path
if not self.mute and self._ladybug_tools_folder:
print('Path to the ladybug tools installation folder is set to: '
'{}'.format(self._ladybug_tools_folder))
@property
def default_epw_folder(self):
"""Get or set the path to the default folder where EPW files are stored."""
return self._default_epw_folder
@default_epw_folder.setter
def default_epw_folder(self, path):
if not path: # check the default location for epw files
path = self._find_default_epw_folder()
self._default_epw_folder = path
if not self.mute and self._default_epw_folder:
print('Path to the default epw folder is set to: '
'{}'.format(self._default_epw_folder))
@property
def config_file(self):
"""Get or set the path to the config.json file from which folders are loaded.
Setting this to None will result in using the config.json module included
in this package.
"""
return self._config_file
@config_file.setter
def config_file(self, cfg):
if cfg is None:
cfg = os.path.join(os.path.dirname(__file__), 'config.json')
self._load_from_file(cfg)
self._config_file = cfg
def _load_from_file(self, file_path):
"""Set all of the the properties of this object from a config JSON file.
Args:
file_path: Path to a JSON file containing the file paths. A sample of this
JSON is the config.json file within this package.
"""
# check the default file path
assert os.path.isfile(file_path), \
ValueError('No file found at {}'.format(file_path))
# set the default paths to be all blank
default_path = {
"ladybug_tools_folder": r'',
"default_epw_folder": r''
}
with open(file_path, 'r') as cfg:
try:
paths = json.load(cfg)
except Exception as e:
print('Failed to load paths from {}.\nThey will be set to defaults '
'instead\n{}'.format(file_path, e))
else:
for key, p in paths.items():
if not key.startswith('__') and p.strip():
default_path[key] = p.strip()
# set paths for the ladybug_tools_folder and default_epw_folder
self.ladybug_tools_folder = default_path["ladybug_tools_folder"]
self.default_epw_folder = default_path["default_epw_folder"]
def _find_default_epw_folder(self):
"""Find the the default EPW folder in its usual location.
An attempt will be made to create the directory if it does not already exist.
"""
epw_folder = os.path.join(self.ladybug_tools_folder, 'resources', 'weather')
if not os.path.isdir(epw_folder):
try:
os.makedirs(epw_folder)
except Exception as e:
raise OSError('Failed to create default epw '
'folder: %s\n%s' % (epw_folder, e))
return epw_folder
@staticmethod
def _find_default_ladybug_tools_folder():
"""Find the the default ladybug_tools folder in its usual location.
An attempt will be made to create the directory if it does not already exist.
"""
home_folder = os.getenv('HOME') or os.path.expanduser('~')
install_folder = os.path.join(home_folder, 'ladybug_tools')
if not os.path.isdir(install_folder):
try:
os.makedirs(install_folder)
except Exception as e:
raise OSError('Failed to create default ladybug tools installation '
'folder: %s\n%s' % (install_folder, e))
return install_folder
"""Object possesing all key folders within the configuration."""
folders = Folders()
|
ladybug-analysis-tools/ladybug-core
|
ladybug/config.py
|
Python
|
gpl-3.0
| 5,373
|
[
"EPW"
] |
5d0c0c5c185ed65909993600d9532a75aa6fd9f574c5de020224d0ef0050994b
|
#import director
from director import cameraview
from director import transformUtils
from director import visualization as vis
from director import objectmodel as om
from director.ikparameters import IkParameters
from director.ikplanner import ConstraintSet
from director import polarisplatformplanner
from director import robotstate
from director import segmentation
from director import sitstandplanner
from director.timercallback import TimerCallback
from director import visualization as vis
from director import planplayback
from director import lcmUtils
from director.uuidutil import newUUID
import os
import functools
import numpy as np
import scipy.io
import vtkAll as vtk
import bot_core as lcmbotcore
from director.tasks.taskuserpanel import TaskUserPanel
import director.tasks.robottasks as rt
from director import filterUtils
from director import ioUtils
import director
from numpy import array
class CourseModel(object):
def __init__(self):
pose = transformUtils.poseFromTransform(vtk.vtkTransform())
self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp')
self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None)
segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0])))
self.originFrame = self.pointcloudPD.getChildFrame()
t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951]))
self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691]))
self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972]))
self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893]))
self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022]))
self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451]))
self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374]))
self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575]))
self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ]))
self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.]))
self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
self.frameSync = vis.FrameSync()
self.frameSync.addFrame(self.originFrame)
self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
|
patmarion/director
|
src/python/director/coursemodel.py
|
Python
|
bsd-3-clause
| 5,177
|
[
"VTK"
] |
9b95dd00305e27ac1075bb3959b001d625df1f6c4284219e9bee1c0f1c6e9bdb
|
# Name: mapper_occci_online.py
# Purpose: Nansat mapping for OC CCI data, stored online in THREDDS
# Author: Anton Korosov
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
import os
import json
import numpy as np
import datetime as dt
import pythesint as pti
from nansat.nsr import NSR
from nansat.mappers.opendap import Dataset, Opendap
# http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/conc/2016/04/ice_conc_sh_polstere-100_multi_201604261200.nc ice_conc
# http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/drift_lr/merged/2016/04/ice_drift_nh_polstere-625_multi-oi_201604151200-201604171200.nc dX dY
# http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/type/2016/04/ice_type_nh_polstere-100_multi_201604151200.nc ice_type
# http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/edge/2016/04/ice_edge_nh_polstere-100_multi_201604241200.nc ice_edge
# http://thredds.met.no/thredds/dodsC/osisaf_test/met.no/ice/drift_lr/merged/2013/09/ice_drift_nh_polstere-625_multi-oi_201309171200-201309191200.nc dX dY
class Mapper(Opendap):
''' VRT with mapping of WKV for NCEP GFS '''
baseURLs = ['http://thredds.met.no/thredds/dodsC/cryoclim/met.no/osisaf-nh',
'http://thredds.met.no/thredds/dodsC/osisaf_test/met.no/ice/',
'http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/']
timeVarName = 'time'
xName = 'xc'
yName = 'yc'
t0 = dt.datetime(1978, 1, 1)
srcDSProjection = NSR().wkt
def __init__(self, filename, gdalDataset, gdalMetadata, date=None, ds=None, bands=None,
cachedir=None, **kwargs):
''' Create NCEP VRT
Parameters:
filename : URL
date : str
2010-05-01
ds : netCDF.Dataset
previously opened dataset
'''
self.test_mapper(filename)
ds = Dataset(filename)
proj4str = '%s +units=%s' % (ds.variables['Polar_Stereographic_Grid'].proj4_string,
ds.variables['xc'].units)
self.srcDSProjection = NSR(proj4str).wkt
if filename[-3:] == '.nc':
date = self.t0 + dt.timedelta(seconds=ds.variables['time'][0])
date = date.strftime('%Y-%m-%d')
self.create_vrt(filename, gdalDataset, gdalMetadata, date, ds, bands, cachedir)
# add instrument and platform
mm = pti.get_gcmd_instrument('Passive Remote Sensing')
ee = pti.get_gcmd_platform('Earth Observation Satellites')
self.dataset.SetMetadataItem('instrument', json.dumps(mm))
self.dataset.SetMetadataItem('platform', json.dumps(ee))
def convert_dstime_datetimes(self, dsTime):
''' Convert time variable to np.datetime64 '''
dsDatetimes = np.array([np.datetime64(self.t0 + dt.timedelta(seconds=day))
for day in dsTime]).astype('M8[s]')
return dsDatetimes
|
nansencenter/nansat
|
nansat/mappers/mapper_opendap_osisaf.py
|
Python
|
gpl-3.0
| 3,073
|
[
"NetCDF"
] |
2e9afb977b021248aaf61e131e59b8e0b61be05caf45f67647d8fba062d00cf7
|
import numpy as np
import unittest
import discretize
try:
import vtk.util.numpy_support as nps
except ImportError:
has_vtk = False
else:
has_vtk = True
if has_vtk:
class TestTensorMeshVTK(unittest.TestCase):
def setUp(self):
h = np.ones(16)
mesh = discretize.TensorMesh([h, 2 * h, 3 * h])
self.mesh = mesh
def test_VTK_object_conversion(self):
mesh = self.mesh
vec = np.arange(mesh.nC)
models = {"arange": vec}
vtkObj = mesh.to_vtk(models)
self.assertEqual(mesh.nC, vtkObj.GetNumberOfCells())
self.assertEqual(mesh.nN, vtkObj.GetNumberOfPoints())
self.assertEqual(
len(models.keys()), vtkObj.GetCellData().GetNumberOfArrays()
)
bnds = vtkObj.GetBounds()
self.assertEqual(mesh.x0[0], bnds[0])
self.assertEqual(mesh.x0[1], bnds[2])
self.assertEqual(mesh.x0[2], bnds[4])
for i in range(vtkObj.GetCellData().GetNumberOfArrays()):
name = list(models.keys())[i]
self.assertEqual(name, vtkObj.GetCellData().GetArrayName(i))
arr = nps.vtk_to_numpy(vtkObj.GetCellData().GetArray(i))
arr = arr.flatten(order="F")
self.assertTrue(np.allclose(models[name], arr))
if __name__ == "__main__":
unittest.main()
|
simpeg/discretize
|
tests/base/test_tensor_vtk.py
|
Python
|
mit
| 1,428
|
[
"VTK"
] |
e4eda4b2b8f2e5c35b48f8208592a53dbaa9afb94c5e638e145938d51062d9d8
|
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
try:
import __builtin__ as builtins
except ImportError:
import builtins
import unittest
from imp import reload
from splinter.exceptions import DriverNotFoundError
from .fake_webapp import EXAMPLE_APP
class BrowserTest(unittest.TestCase):
def patch_driver(self, pattern):
self.old_import = builtins.__import__
def custom_import(name, *args, **kwargs):
if pattern in name:
return None
return self.old_import(name, *args, **kwargs)
builtins.__import__ = custom_import
def unpatch_driver(self, module):
builtins.__import__ = self.old_import
reload(module)
def browser_can_change_user_agent(self, webdriver):
from splinter import Browser
browser = Browser(driver_name=webdriver, user_agent="iphone")
browser.visit(EXAMPLE_APP + "useragent")
result = "iphone" in browser.html
browser.quit()
return result
def test_brower_can_still_be_imported_from_splinters_browser_module(self):
from splinter.browser import Browser # NOQA
def test_should_work_even_without_zope_testbrowser(self):
self.patch_driver("zope")
from splinter import browser
reload(browser)
self.assertNotIn("zope.testbrowser", browser._DRIVERS)
self.unpatch_driver(browser)
def test_should_raise_an_exception_when_browser_driver_is_not_found(self):
with self.assertRaises(DriverNotFoundError):
from splinter import Browser
Browser("unknown-driver")
|
bmcculley/splinter
|
tests/test_browser.py
|
Python
|
bsd-3-clause
| 1,746
|
[
"VisIt"
] |
f1b1d915fdd753f4eaad7dfe6752f726e89a64cc369d82ee76e9a72f8aa13cce
|
from pymol.wizard import Wizard
from pymol import cmd
import pymol
import types
import string
class Pseudoatom(Wizard):
def __init__(self,mode='label',pos='[0.0,0.0,0.0]',_self=cmd):
Wizard.__init__(self,_self)
self.mode = mode
if mode == 'label':
self.prefix = 'Label text: \888'
self.text = ''
self.pos = pos
def get_event_mask(self):
return Wizard.event_mask_key
def do_key(self,k,x,y,m):
if k in [8,127]:
self.text = self.text[:-1]
elif k==27:
self.cmd.set_wizard()
self.cmd.refresh()
elif k==32:
self.text = self.text + " "
elif k>32:
self.text = self.text + chr(k)
elif k==10 or k==13:
self.text = string.strip(self.text)
if self.mode=='label':
obj_name = self.cmd.get_unused_name(string.lower(self.text[0:14]),0)
self.cmd.pseudoatom(obj_name,pos=self.pos,label=self.text)
self.cmd.set_wizard()
self.cmd.refresh()
return 1
self.cmd.refresh_wizard()
return 1
def get_prompt(self):
self.prompt = [ self.prefix + self.text + "_" ]
return self.prompt
def get_panel(self):
return [
[ 2, 'Cancel', 'cmd.set_wizard()' ]
]
|
gratefulfrog/lib
|
python/pymol/wizard/pseudoatom.py
|
Python
|
gpl-2.0
| 1,382
|
[
"PyMOL"
] |
92249308d75554aefebf68310e21c8f73e71dcf2e7e3abfd13a35747be697c9f
|
class WaterVapor(object):
"""
Vapor of H2O.
Note that the class name is singular.
"""
pass
class IceCrystal(object):
"""
Crystal of Ice.
Note that between classes there are always two spaces.
"""
pass
class SnowFlake(object):
""" Snowflakes. """
def __init__(self, vapor, ice):
""" Set vapor and ice for snowflake. """
self.vapor = vapor
self.ice = ice
|
dokterbob/slf-programming-workshops
|
examples/snowball/water/phases.py
|
Python
|
mit
| 433
|
[
"CRYSTAL"
] |
fea31d377b863dc09ae444664d5203edbda037f279a1afa646c42102d5ce28d0
|
# This file contains all functions to extract fundus curves from per-vertex-value (e.g., curvature) map
# Last updated: 2011-08-09 Forrest Sheng Bao
from mindboggle.utils import io_file, io_vtk, freesurfer
import libbasin
#import libfundifc as libskel
from numpy import mean, std, abs, matrix, zeros, flatnonzero, sign, array, argmin, median
import sys
from math import sqrt
import cPickle
import vtk
sys.setrecursionlimit(30000)
def gen_Adj(VrtxCmpnts, VrtxNbrLst, CurvatureDB, Pits):
'''Compute/load weighted adjacency matrixes of all connected sulcal components.
Parameters
===========
Adj : list of list of integers/doubles
adjacency matrix of a connected component
Dist : list of list of integers
Dist[i][j] is the shortest distance between vertex i and vertex j
Dist[i] is the Adj of the i-th connected component
CID : integer
a component ID
Cmpnt : list of integers
global vertex IDs of all vertexes in a connected sulcal component
VrtxIdx : integer
local (inner-componnent) ID of a vertex
NbrIdx : integer
local (inner-componnent) ID of a vertex's Nbr
CurvatureDB : list of doubles
each element is the curvature value of a vertex
Pits: list of integers
Vertex IDs of pits
Notes
=======
Before HBM, this function uses graph power to weigh links between nodes.
Now (2011-07-17) it uses curvature to weigh.
'''
print "computing/loading weighted adjacency matrixes"
Dists = []
for CID, Cmpnt in enumerate(VrtxCmpnts):
Num = len(Cmpnt)
# print "\t component", CID+1, ": size", Num
if Num > 1:
# Commented Forrest 2012-01-21, drop matrix for accessing larger memory
# Adj = matrix(zeros((Num, Num)))
# for VrtxIdx, Vrtx in enumerate(Cmpnt):
# for Nbr in VrtxNbrLst[Vrtx]:
# if Nbr in Cmpnt:
# NbrIdx = Cmpnt.index(Nbr)
# LinkWeight = -1. * (CurvatureDB[Vrtx] + CurvatureDB[Nbr])
#
# # add a double check here to ensure the matrix is diagonally symmetric
# if Adj[VrtxIdx, NbrIdx] == 0:
# Adj[VrtxIdx, NbrIdx] = LinkWeight #
# # Adj[NbrIdx, VrtxIdx] = LinkWeight # write only once for checking later
# elif Adj[VrtxIdx, NbrIdx] != 0 and Adj[VrtxIdx, NbrIdx] != LinkWeight:
# print "error, Adj is not symmetric."
# elif Adj[NbrIdx, VrtxIdx] != 0 and Adj[NbrIdx, VrtxIdx] != LinkWeight:
# print "error, Adj is not symmetric."
#
# Dist = [[i for i in Row] for Row in list(array(Adj))]
# End of Commented Forrest 2012-01-21, drop matrix for accessing larger memory
# now use a new solution.
Dist=[[0 for i in range(Num)] for j in range(Num)]
for VrtxIdx, Vrtx in enumerate(Cmpnt):
for Nbr in VrtxNbrLst[Vrtx]:
if Nbr in Cmpnt:
NbrIdx = Cmpnt.index(Nbr)
LinkWeight = -1. * (CurvatureDB[Vrtx] + CurvatureDB[Nbr])
if Vrtx in Pits or Nbr in Pits:
LinkWeight *= 10 # increase the edge weight for edges connecting pits
# add a double check here to ensure the matrix is diagonally symmetric
if Dist[VrtxIdx][NbrIdx] == 0:
Dist[VrtxIdx][NbrIdx] = LinkWeight #
# Adj[NbrIdx, VrtxIdx] = LinkWeight # write only once for checking later
elif Dist[VrtxIdx][NbrIdx] != 0 and Dist[VrtxIdx][NbrIdx] != LinkWeight:
print "error, Adj is not symmetric."
elif Dist[NbrIdx][VrtxIdx] != 0 and Dist[NbrIdx][VrtxIdx] != LinkWeight:
print "error, Adj is not symmetric."
# end of now use a new solution
else:
Dist = [[1]]
Dists.append(list(Dist)) # this step might be the cause of large memory consumption
return Dists
def downsample(SpecialAndRing, Special, VrtxCmpnts, NbrLst, Prob):
'''Randomly delete vertexes on original mesh by probability Prob.
Parameters
=============
SpecialAndRing: list of integers
global IDs of special and 0-curvature vertexes
It can be the same as Special, which means no vertexes other than
Special is considered must have in downsampled mesh
Special : list of integers
special vertexes that have to be in downsampled mesh.
VrtxCmpnts : list of lists of integers
VrtxCmpnts[i] is a list of vertexes in the i-th connected component
NbrLst : list of lists of integers
neighbor list of vertexes
Prob : float
The probability \in [0, 1] that a vertex is to be KEPT.
If it is 1, remove nothing.
If it is 0, remove ALL
N : list of lists of integers
new vertexes to be left after downsampling
L : list of lists of integers
neighbor list of vertexes after downsampling
Vrtx : integer
a vertex id
MinusVrtx : integer
number of vertexes removed in mesh downsampling
MinusEdge : integer
number of edges removed in mesh downsampling
Keep : list of integers
vertexes to be kept
SpecialGroup: list of list of integers
each element is a list of Special vertexes in each connected component
SpecialInThisGroup : list of integers
a list of Special vertexes ID, used to represent all Special vertexes in a running component
'''
print "Downsampling mesh..."
import random
N = []
L = [[] for i in xrange(0,len(NbrLst))]
SpecialGroup = []
for Cmpnt in VrtxCmpnts:
for Vrtx in Cmpnt:
L[Vrtx] = NbrLst[Vrtx]
for CmpntID, Cmpnt in enumerate(VrtxCmpnts): # for each component
# MinusVrtx, MinusEdge = 0, 0
# NumMusthave = 0
Keep = []
SpecialInThisGroup = []
for Vrtx in Cmpnt: # for each vertex in the component
if Vrtx in SpecialAndRing:
# N[-1].append(Vrtx)
# NumMusthave += 1
if Vrtx in Special:
SpecialInThisGroup.append(Vrtx)
if not Vrtx in Keep:
Keep.append(Vrtx)
elif not Vrtx in Keep: # The purpose of not Vrtx in Keep is to avoid removing vertexes that should not be removed, such as Musthave.
if Prob <= random.random(): # REMOVE Vrtx
# MinusVrtx += 1
for Nbr in NbrLst[Vrtx]:
if Nbr in Cmpnt:
# step 1: put Vrtx's neighbors, which are ALSO in Cmpnt, into N and thus delete Vrtx
if not Nbr in Keep:
Keep.append(Nbr) # yield more vertexes and edges removed
# step 2: delete edges ending at Vrtx
if Vrtx in L[Nbr]:
# N[-1].append(Nbr) # yield less vertexes and edges removed
# Left.append(Nbr) # yield less vertexes and edges removed
L[Nbr].remove(Vrtx)
# MinusEdge += 1
L[Vrtx] = [] # Vrtx has no neighbor now
else: # KEEP this vertex
Keep.append(Vrtx)
N.append(Keep)
SpecialGroup.append(SpecialInThisGroup)
# if NumMusthave != len(SpecialInThisGroup):
# print "more vertex in musthave\n"
# exit(0)
# print "\t component", CmpntID+1, ":", NumMusthave, "Specials. ", "Vtx #:", len(Cmpnt), "-", MinusVrtx, "=>", len(Keep)
return N, L, SpecialGroup
def prune(Path, Degree, TreeNbr, Terminal, Branching, Special, VrtxCmpnt):
'''Prune an MST by deleting edges
Parameters
===========
Path : list of lists (2-tuple) of integers
Each element of *Path* is a list of the two terminals of each pair of connected fundus vertexes
Vertexes indexed LOCALLY, i.e., they are referred by their ID in currenct component
Degree : list of integers
Degrees of nodes in a component
TreeNbr : list of list of integers
Each element is a list of neighbors of a node. All LOCAL IDs.
Terminal : list of integers
Local IDs of nodes that are terminals.
Branching : list of integers
Local IDs of nodes that are branching nodes.
Special : list of integers
Local IDs of nodes that are special vertexes connected by MST
Trace : list of 2-tuples of integers
Edges that are visited along the Path from one node to another
Visited : list of integers
Nodes that are visited along the Path from one node to another
At : integer
a node
Previous : integer
a node
VrtxCmpnt: list of integers
A list of vertexes in this component in global ID
Returns
========
Path : list of lists (2-tuple) of integers
Each element of *Path* is a list of the two terminals of each pair of connected fundus vertexes
Vertexes indexed LOCALLY, i.e., they are referred by their ID in currenct component
This one is reduced
Note
======
2011-10-04 Since we only want links between special vertexes, all links starting from
a terminal must be removed. Only links between special vertexes are left.
'''
# print "\t # of terminals:", len(Terminal)
# print "\t\t Path:", Path
# print "\t\t Special:", Special
NodeColor = {} # to visualize visited nodes of different types
# print "\t in prune(), # of specials", len(Special)
for T in Terminal:
# print "\t\t\t Tracing begins at ", T,
if len(Branching) < 1:
# print "\t\t stop pruning at ", Terminal.index(T), "-th Terminal"
break
if T in Special: # Forrest 2011-10-04
continue
Trace= [ ]# store the trace visited from a terminal node to a branching node
Visited = [] # store nodes that have been visited
At = T # the node of current position in tracing
NodeColor[VrtxCmpnt[T]] = 1
while(not At in Branching and not At in Special):
Visited.append(At)
for Nbr in TreeNbr[At]:
if not Nbr in Visited: # search toward the mainstream
Trace.append((Nbr, At))
Previous = At # the node visited before At in tracing
At = Nbr
break
# print "\t\t\tTrace ", Trace
# after the while loop, At stops at a branching node.
# block below deactivated Forrest 2011-10-04 to remove all links from Terminal
'''
if not At in Special:
NodeColor[VrtxCmpnt[At]] = 2 # just a regular branching node
TreeNbr[At].remove(Previous)
for Pair in Trace:
(Src, Dst) = Pair
if Pair in Path:
Path.remove(Pair)
else: # it is possible the order of nodes is reversed in Path
Path.remove((Dst, Src))
Degree[At] -= 1
if Degree[At] < 3:
Branching.remove(At)
elif At in Special and At in Branching:
NodeColor[VrtxCmpnt[At]] = 5
elif At in Special and At in Terminal:
NodeColor[VrtxCmpnt[At]] = 4
else: # Special only
NodeColor[VrtxCmpnt[At]] = 3
'''
# Delete all links from non-special terminals Forrest 2011-10-04
NodeColor[VrtxCmpnt[At]] = 2 # just a regular branching node
TreeNbr[At].remove(Previous)
for Pair in Trace:
(Src, Dst) = Pair
if Pair in Path:
Path.remove(Pair)
else: # it is possible the order of nodes is reversed in Path
Path.remove((Dst, Src))
if At in Branching: # may stop at a Special-only node
Degree[At] -= 1
if Degree[At] < 3:
Branching.remove(At)
# End of Delete all links from non-special terminals Forrest 2011-10-04
# print "\t\t Final path: ", Path
return Path, NodeColor
def nonZeroLn(List): # activated 2011-05-25 19:14
'''Given a 2-D list, return number of 1-D lists that contains nonzero elements
'''
Counter = 0
for L in List:
for E in L:
if E != 0:
Counter += 1
break
return Counter
class Prim: # modified from the code without license @http://hurring.com/scott/code/python/mst_prim/v0.1/mst_prim.py
INFINITY = 2**8 # this is large enough for current problem size
vertices = 0
def __init__(self, A, r):
"""
Prepare the inputs for mst_prime
"""
self.vertices = A[0].__len__();
self.nonzeros = nonZeroLn(A) # a new member, activated Forrest 2011-05-25 18:56
self.init_adjacency(A)
self.remove_route(A, r)
self.degree = [0 for i in xrange(0, len(A))] # a new member , activated Forrest 2011-05-27 20:31
self.tree_nbr= [[] for i in xrange(0, len(A))] # record nbrs of each node, Forrest 2011-09-24
def mst_prim(self, A, w, i, path, degree, tree_nbr):
"""
'A' is the adjacency matrix
'w' is the list of all connected vertices (in order of discovery)
'path' is a list of tuples showing (from, to)
i : the ID of the connected component # Forrest 2011-05-26 00:31
"""
# Stop when we've added all nodes to the path
# if (w.__len__() == self.vertices): # old line. But if some nodes are not connected, it goes into infinite recursion. Deactivated Forrest 2011-05-25 19:39
if (w.__len__() == self.nonzeros): # new way, activated Forrest 2011-05-25 19:42
return (A, w, path, degree, tree_nbr)
# Find minimum path coming OUT of the known vertexes
(vfrom, vto, vcost) = self.find_min(A, w)
# increase the degreee for vertexes vfrom and vto
degree[vfrom] += 1
degree[vto] += 1
# update tree_nbr list for vfrom and vto Forrest 2011-09-24 10:55
tree_nbr[vfrom].append(vto)
tree_nbr[vto].append(vfrom)
# Mark down this vertex as being a part of the MST path
w.append(vto)
#path.append((vfrom,vto,vcost, i)) # commented, Forrest 2011-09-24
path.append((vfrom, vto))
self.remove_route(A, vto)
return self.mst_prim(A, w, i, path, degree, tree_nbr)
def init_adjacency(self, A):
"""
Initialize adjacency list - set 0 = INFINITY
"""
for i in range(0, self.vertices):
for j in range(0, self.vertices):
if A[i][j] == 0:
A[i][j] = 2**8
def remove_route(self, A, v):
"""
Once we've added a node to our path, set all routes
to this node equal to INFINITY - to prevent loops
"""
for i in range(0, self.vertices):
A[i][v] = self.INFINITY
def find_min(self, A, w):
"""
Find the cheapest connection we can possibly make,
given the partially-built MST 'w'
'vfrom' vertex to connect from
'vto' vertex to connect to
'vcost' cost of connection
"""
vcost = self.INFINITY
vto = vfrom = -1
for v in w:
# Get array offset of minimum of this vertex
i = argmin(A[v])
if A[v][i] < vcost:
vcost = A[v][i]
vto = i
vfrom = v
return (vfrom, vto, vcost)
# The end of Class Prim
def fundiLength(Vrtx, Path):
'''Estimate a fundiLength in accumulation of Euclidean distance of a given Path
Notes
======
Now since a path has only two nodes, it is very easy that we don't need to accumulate.
'''
Length = 0
for i in xrange(0, len(Path)-1):
j = i+1
Length += sqrt( sum([ (Vrtx[i][k]-Vrtx[j][k])**2 for k in [0,1,2]]))
return Length
def mst(Adjs, VrtxCmpnts, SpecialGroup, NbrLst, Coordinates):
'''Using Prim algorithm to connect nodes (including fundus vertexes) within each connected component
Parameters
==========
VrtxCmpnts : list of lists of integers
VrtxCmpnts[i] is a list of vertexes in the i-th connected component
SpecialGroup : list of lists of integers
Special[i] is a list of Special vertexes in the i-th connected component
Special[i] can be empty
NbrLst : list of list of integers
neighbor list of vertexes
Path : list of lists (2-tuple) of integers
Each element of *Path* is a list of the two terminals of each pair of connected fundus vertexes
Vertexes indexed LOCALLY, i.e., they are referred by their ID in currenct component
Adj : list of lists of integers
adjacency matrix of fundus vertexes in one connected component
Adjs : list of Adj's
adjacency matrixes of all fundus vertexes on one hemisphere
Coordinates : list of 3-tuples
Coordinates of vertexes in GLOBAL index.
W : list of integers
vertexes taht are already connected in Prim algorithm
It has no use.
Segs : list of lists (2-tuple) of integers
Vertexes indexed GLOBALLY, i.e., they are referred by their ID in the entire hemisphere
Degree : list of integers
Degrees of nodes in a component
TreeNbr : list of list of integers
Each element is a list of neighbors of a node. All LOCAL IDs.
Terminal : list of integers
Local IDs of nodes that are terminals.
Branching : list of integers
Local IDs of nodes that are branching nodes.
FundusLen : dictionary of integers
Each key is a GLOBAL vertex ID.
The value of each key is the length of the fundus that the key is part of.
FundusID : dictionary of integers
Each key is a GLOBAL vertex ID.
The value of each key is the ID (equal to component ID) of the fundus that the key is part of.
'''
Segs = []
Color = {}
FundusLen, FundusID = {}, {}
if len(Adjs) != len(VrtxCmpnts):
print "Error, Adjs is not as long as VrtxCmpnts"
exit()
else:
print "Connecting fundus vertexes in", len(VrtxCmpnts), "connected components."
for i in xrange(0, len(Adjs)): # For each component in the hemisphere
print "\t MST on component",i+1, ",",
if len(SpecialGroup[i]) < 2 : # This compnent has no more than two vertexes to be connected
print "\t Skipped. Too few Special vertexes."
elif len(VrtxCmpnts[i]) >200: # For quick debugging ONLY. Forrest 2011-09-29 16:56
print "\t Skipped. Too many vertexes (all kinds). "
else:
# print "\t # of special points", len(SpecialGroup[i]) ,
Root = VrtxCmpnts[i].index(SpecialGroup[i][0]) # always start MST from a special vertex
# Adj = Adjs[i] # avoid creating new variable to speed up
# Cmpnt = VrtxCmpnts[i] # avoid creating new variable to speed up
Num = len(Adjs[i])
if Num > 1: # the Num < 1000 is for fast debugging
M = Prim(Adjs[i], Root) # starting from the Root
(Adj, W, Path, Degree, TreeNbr) = M.mst_prim(Adjs[i], [Root], i, [], M.degree, M.tree_nbr) # starting from the Root
# Seg = [[VrtxCmpnts[i][Idx] for Idx in Pair] for Pair in Path] # The Idx is LOCAL (i.e., within the connected component) index of a vertex.
# pruning the MST Forrest 2011-09-24
Terminal, Branching =[], []
for Vrtx in xrange(0,Num):
if Degree[Vrtx] ==1:
Terminal.append(Vrtx)
elif Degree[Vrtx] > 2:
Branching.append(Vrtx)
Special = [VrtxCmpnts[i].index(Vrtx) for Vrtx in SpecialGroup[i]] # converting global ID to local ID
Path, NodeColor = prune(Path, Degree, TreeNbr, Terminal, Branching, Special, VrtxCmpnts[i])
# insert le troter's approach here
print len(Path), "links after pruning."
Length_of_fundus = fundiLength(Coordinates, Path) # Add, Forrest 2011-11-01
for Pair in Path:
# (Src, Dst, Cost, CID) = Pair # commented, Forrest 2011-09-24
(Src, Dst) = Pair # Forrest 2011-09-24
# Segs.append( [VrtxCmpnts[i][Src], VrtxCmpnts[i][Dst], Cost, i] )
#Path = [ VrtxCmpnts[i][Src], VrtxCmpnts[i][Dst] ] # Forrest 2011-07-17, do NOT map links onto the mesh Commented 2011-10-21
# FundusLen[VrtxCmpnts[i][Src]]=len(Path) # Forrest 2011-11-01 This is Manhattan distance
# FundusLen[VrtxCmpnts[i][Dst]]=len(Path) # Forrest 2011-11-01 This is Manhattan distance
FundusLen[VrtxCmpnts[i][Src]] = Length_of_fundus
FundusLen[VrtxCmpnts[i][Dst]] = Length_of_fundus
FundusID[VrtxCmpnts[i][Src]]= i # Remember, now FundusID starts from 0
FundusID[VrtxCmpnts[i][Dst]]= i # Remember, now FundusID starts from 0
Segs.append([VrtxCmpnts[i][Src], VrtxCmpnts[i][Dst]])
# Segs += Seg
# print "number of segments: ", len(Segs)
Color.update(NodeColor)
return Segs, Color, FundusLen, FundusID
def lineUp(Special, NbrLst, VrtxCmpnts, VtxCoords, CurvatureDB):
'''Form an MST of a set of vertexes and prune unwanted subtrees
Parameters
===========
Special : list of integers
A list of special vertexes to be connected by MST
It can be all vertexes on original mesh
NbrLst : list of list of integers
neighbor list of vertexes
VrtxCmpnts : list of list of integers
each element of VrtxCmpnt is a list of vertexes that are in the same sulcal component
Curve : list of integers
vertexes on the curve segment form fundi NOT LINED UP really now 04/12/2011
VtxCoords : list of 3-tuples of doubles
each element is the coordinate of a vertex element
CurvatureDB : list of doubles
each element is the curvature value of a vertex
Links : list of list of 2-tuples of integers
each element is a list of 2-tuples containing vertexes (in global ID) at two ends of an edge on MSTs
Since the original mesh has been downsampled, each link in *Links* is a real edge on the mesh.
SpecialGroup: list of list of integers
each element is a list of Special vertexes in each connected component
Ring : list of integers
IDs of vertexes that have 0-curvature
Curv: float
curvature value of a vertex
Idx: integer
ID of a vertex
Notes
=======
last updated: 2011-07-17 Forrest
The function name lineUp does NOT reveal its purpose. Originally, this function is used to connect
special vertexes via MST algorithm. But now MST is spanned on both special vertexes and common
vertexes.
'''
# Step 1: downsample the graph
Ring = []
# for Idx, Curv in enumerate(CurvatureDB):
# if abs(Curv) <0.01:
# Ring.append(Idx)
NewVrtxCmpnts, NewNbrLst, SpecialGroup = downsample(Special+Ring, Special, VrtxCmpnts, NbrLst, 0)
# step 2: prepare the distance matrix for FUNDUS vertex, for each fundus vertex, only 2 shortest edges are left
# Columns and rows for non-fundus vertexes are all zeros.
Dists = gen_Adj(NewVrtxCmpnts, NewNbrLst, CurvatureDB, Special)
# Dists = zeroout(Dists, VrtxCmpnt, FundiList) # only needed if MST only spans over special vertexes
# Now Dists are weighted adjacency matrix of fundus vertexes in each component
# io_file.wrtLists(DistFile+'.reduced', Dists) # optional line, for debugging
# Dists = io_file.readLists(DistFile+'.reduced') # for fast debugging only
# End of step 2
# Step 3: use MST to connect all vertexes in NewVrtxCmpnts and dump into VTK format
#FundusCmpnts = filterCmpnt(Nodes, VrtxCmpnts) # no need if Nodes are all vertexes on the mesh
#FundusCmpnts = VrtxCmpnts # if MST will span via all nodes on the mesh
#Links = mst(Dists, VrtxCmpnts, FundusCmpnts, NbrLst) # deactivated Forrest 2011-07-21 because filters is done in downsample()
Links, NodeColor, FundusLen, FundusID = mst(Dists, NewVrtxCmpnts, SpecialGroup, NbrLst, VtxCoords) # Running version
#Links = mst(Dists, NewVrtxCmpnts, NewVrtxCmpnts, NbrLst) # debugging version
return Links, NodeColor, FundusLen, FundusID
# End of functions to connect special vertexes ---
def stepFilter(L, Y, Z):
'''Return L such that L[i]:= L[i] if L[i] > Y, else, Z.
'''
for Idx, X in enumerate(L):
if X>Y:
L[Idx] = X
else:
L[Idx] = Z
return L
def fundiFromPits(Pits, Maps, Mesh, FundiVTK, SulciThld, SulciMap, Extract_Fundi_on_Map):
'''Connecting pits into fundus curves
Parameters
============
Pits: list of integers
IDs of vertexes that are pits
Maps: dictionary of lists of floats
Keys are strings, e.g., meancurv, depth, etc.
Values are list of per-vertex values that will be used as structure feature vectors, e.g., thickness
Mesh: List of two lists of floats/integers
The first are points from a VTK file.
The second are triangular faces from a VTK file.
Vrtx: list of 3-tuples of floats
Each element is the X-, Y- and Z-cooridnates of a vertex on the surface, normally pial
Fc: list of 3-tuples of integers
Each element is the ids of 3 vertexes that form one triangle on the surface
SulciThld: a float
The number that was used to threhold the surface to get sulci.
This is need for loading component file, but it has nothing to do fundi extraction itself
Extract_Fundi_on_Map: string
The key for the map on which fundi will be built (default: depth)
Notes
========
Function and variable names are not fully changed yet. Names like curvature is bad.
'''
[Vrtx, Fc] = Mesh
scalar_names = [Name for Name in Maps.iterkeys()]
scalar_lists = [scalar_list for scalar_list in Maps.itervalues()]
LastSlash = len(FundiVTK) - FundiVTK[::-1].find('/')
Hemi = FundiVTK[:FundiVTK[LastSlash:].find('.')+LastSlash]# path up to which hemisphere, e.g., /home/data/lh
NbrLst = libbasin.vrtxNbrLst(len(Vrtx), Fc, Hemi)
FcCmpnt, VrtxCmpnt = libbasin.compnent([], [], [], ".".join([Hemi, SulciMap, str(SulciThld)]))
PSegs, NodeColor, FundusLen, FundusID = lineUp(Pits, NbrLst, VrtxCmpnt, Vrtx, Maps[Extract_Fundi_on_Map])
len_scalars = [0 for i in xrange(0,len(NbrLst))]
for Key, Value in FundusLen.iteritems():
len_scalars[Key] = Value
scalar_names.append('fundusLength')
scalar_lists.append(len_scalars)
FIDscalars = [-1 for i in xrange(0,len(NbrLst))] # value for gyri is now -1 Forrest 2011-11-01
for Key, Value in FundusID.iteritems():
FIDscalars[Key] = Value
scalar_names.append('fundusID')
scalar_lists.append(FIDscalars)
# io_vtk.write_lines(FundiVTK, Vrtx, Pits, PSegs, scalar_lists, scalar_names)
io_vtk.write_vtk(FundiVTK, Vrtx, indices=Pits, lines=PSegs, faces=[],
scalars=scalar_lists, scalar_names=scalar_names, scalar_type='int')
def getFeatures(InputFiles, Type, Options):
'''Loads input files of different types and extraction types, and pass them to functions that really does fundi/pits/sulci extraction
Parameters
===========
Type: string
If Types == 'FreeSurfer,
the VTK files should at least be a curvature/convexity and a surface file.
If Type == 'vtk',
The user needs to specify using which map to threshold the surface
and using which map to extract pits and fundi.
mapThreshold: list
a list of per-vertex values that will be used to threshold the cortical surface to get sulcal basins, e.g., curvature/convexity
mapExtract: list
a list of per-vertex values that will be used to extract fundi/pits, e.g., curvature/convexity
mapFeature: lists of lists of floats
lists of list of per-vertex values that will be used as structure feature vectors, e.g., thickness
PrefixBasin: string
the prefix for all outputs that are only related to basin extraction,
e.g., connected components, basins and gyri.
PrefixExtract: string
the prefix for all outputs that are the result of extraction,
e.g., pits and fundi.
Maps: dictionary
Keys are scalar names, e.g., depth and meancurv. Values are float-value lists, representing maps.
Each map is of size 1 by #vertices
Notes
======
12/23/2011: We are now rewriting the interface from getFundi to libbasin.getBaisn, fundiFromPits and fundiFromSkel
Now variables passed into them are data rather than file names
'''
if Type == 'FreeSurfer':
print "\t FreeSurfer mode\n"
[SurfFile, ThickFile, CurvFile, ConvFile,\
FundiVTK, PitsVTK, SulciVTK, Use, SulciThld]\
= InputFiles
Clouchoux = False
Maps = {}
if ThickFile != "":
Maps["thickness"] = freesurfer.read_curvature(ThickFile)
if CurvFile != "":
Maps["meancurv"] = freesurfer.read_curvature(CurvFile)
if ConvFile != "":
Maps["conv"] = freesurfer.read_curvature(ConvFile)
if Use == 'conv':
Extract_Sulci_on_Map = "conv"
elif Use == 'curv':
Extract_Sulci_on_Map = "meancurv"
else:
print "[ERROR] Unrecognized map to use:", Use
exit()
Mesh = freesurfer.read_surface(SurfFile)
Extract_Fundi_on_Map = "conv"
elif Type == 'vtk':
print "\t Joachim's VTK mode\n"
[DepthVTK, ConvexityFile, ThickFile, MeanCurvVTK, GaussCurvVTK, FundiVTK, PitsVTK, SulciVTK, SulciThld, Clouchoux] = InputFiles
Maps = {}
print " Loading depth map"
Faces, Lines, Vertexes, Points, nPoints, Depth, name, input_vtk = io_vtk.read_vtk(DepthVTK)
Maps['depth'] = Depth
if MeanCurvVTK != "":
print " Loading mean curvature map"
Faces, Lines, Vertexes, Points, nPoints, Maps['meancurv'], name, input_vtk = io_vtk.read_vtk(MeanCurvVTK)
if GaussCurvVTK != "":
print " Loading Gaussian curvature map"
Faces, Lines, Vertexes, Points, nPoints, Maps['gausscurv'], name, input_vtk = io_vtk.read_vtk(GaussCurvVTK)
if ThickFile != '':
Maps['thickness'] = freesurfer.read_curvature(ThickFile)
if ConvexityFile != '':
Maps['sulc'] = freesurfer.read_curvature(ConvexityFile)
Mesh = [Vertexes, Faces]
Extract_Sulci_on_Map = 'depth' # This will become an option for users later.
Extract_Fundi_on_Map = 'depth' # This will become an option for users later.
## common parts for both FreeSurfer and vtk type
if Clouchoux:
libbasin.getBasin_and_Pits(Maps, Mesh, SulciVTK, PitsVTK, SulciThld = SulciThld, PitsThld =0, Quick=False, Clouchoux=True, SulciMap =Extract_Sulci_on_Map) # extract depth map from sulci and pits from mean and Gaussian curvatures
else:
libbasin.getBasin_and_Pits(Maps, Mesh, SulciVTK, PitsVTK, SulciThld = SulciThld, PitsThld =0, Quick=False, Clouchoux=False, SulciMap =Extract_Sulci_on_Map) # by default, extract sulci and pits from depth map
Pits=io_vtk.read_vertices(PitsVTK)
fundiFromPits(Pits, Maps, Mesh, FundiVTK, SulciThld, Extract_Sulci_on_Map, Extract_Fundi_on_Map)
# end of common for both FreeSurfer and vtk type
# elif Type == 'clouchoux':
# print "\t Clouchoux-type pits while sulci and pits from depth"
# [VTKFile, SurfFile2, ConvexFile, ThickFile] = InputFiles
# if SurfFile2 != '':
# Vertexes2, Face2 = io_file.readSurf(SurfFile2)
# Mesh2 = [Vertexes2, Face2]
# else:
# Mesh2 = []
#
# # step 1, get pits, Clouchoux style
# import clouchoux
# Vertexes, Faces, Depth, MCurv, GCurv = clouchoux.load_curvs(VTKFile)
# Mesh = [Vertexes, Faces]
#
## Pits = clouchoux.clouchoux_pits(Vertexes, MCurv, GCurv)
## clouchoux.write_Clouchoux_Pits(Vertexes, Pits, VTKFile[:-3]+"pits.vtk", VTKFile[:VTKFile.find(".")]+".inflated.vtk")
#
# # step 2, get sulci, from depth
# # now this step is skipped to avoid changing basin and gyri file structures
# MapBasin = Depth
# PrefixBasin = VTKFile[:VTKFile.find(".clouchoux")]+ ".travel.depth" # now this is the travel depth based sulci, e.g., lh.travel.depth
# print "PrefixBasin:", PrefixBasin
# libbasin.getBasin_only(MapBasin, Mesh, PrefixBasin, Mesh2, Threshold = 0.2)
#
# # step 3, connect Clouchoux's pits on depth map into fundi
# # preparing input parameters for fundiFromPits()
#
# MapExtract = Depth
# MapFeature = [Depth, MCurv, GCurv]
# FeatureNames = ["depth", "mean_curv", "gauss_curv"]
# PrefixExtract = VTKFile[:-3] + 'depth.fundi'
# print "PrefixExtract:", PrefixExtract
#
# import pyvtk
# Pits=pyvtk.VtkData(VTKFile[:-3] + 'pits.vtk').structure.vertices[0] # since pits are just computed above
# fundiFromPits(Pits, MapExtract, FeatureNames, MapFeature, Mesh, PrefixBasin, PrefixExtract, Mesh2)
## The following elif case is temporarily impossible. So commented.
# elif Type == 'vtk-curv':
# [DepthVTK, CurvFile, SurfFile, SurfFile2, ConvexityFile, ThickFile]= InputFiles
#
# import vtk
# Reader = vtk.vtkDataSetReader()
# Reader.SetFileName(DepthVTK)
# Reader.ReadAllScalarsOn()
# Reader.Update()
# Data = Reader.GetOutput()
# Vertexes= [Data.GetPoint(i) for i in xrange(Data.GetNumberOfPoints())]
# Polys = Data.GetPolys()
# Polys = Polys.GetData()
# Faces=[ [Polys.GetValue(Idx) for Idx in range(4*i+1, 4*(i+1))] for i in range(0, Data.GetNumberOfPolys())]
#
# Curvature = io_file.readCurv(CurvFile)
#
# PointData = Data.GetPointData()
# DepthMap = PointData.GetArray('depth')
# Depth = [-1*DepthMap.GetValue(i) for i in xrange(DepthMap.GetSize() )] # flip the sign of depth map here because original code works with minimum in the bottom.
#
# MapBasin = Curvature
# MapExtract = Depth
#
# PrefixBasin = DepthVTK[:-4] # drop suffix .vtk
#
# PrefixExtract = CurvFile + '.depth'
#
# MapFeature = [Curvature, Depth]
#
# FeatureNames = ['curvature','depth']
# if ConvexityFile != '':
# Convexity = io_file.readCurv(ConvexityFile)
# MapFeature.append(Convexity)
# FeatureNames.append('convexity')
# if ThickFile != '':
# Thickness = io_file.readCurv(ThickFile)
# MapFeature.append(Thickness)
# FeatureNames.append('thickness')
#
# Mesh = [Vertexes, Faces]
# if SurfFile2 != '':
# Vertexes2, Faces2 = io_file.readSurf(SurfFile2)
# Mesh2 = [Vertexes2, Faces2]
# else:
# Mesh2 = []
## End of The following elif case is temporarily impossible. So commented.
#def fundiFromSkel(Curvature, FeatureNames, MapFeature, Vrtx, Fc, SurfFile, CurvFile, SurfFile2):
#
# LUTname, LUT = [], []
# for Idx, Name in enumerate(FeatureNames):
# Table = MapFeature[Idx]
# if Name == 'curv':
# LUT.append(stepFilter(Table, 0, 0))
# LUTname.append('curv')
# elif Name == 'sulc':
# LUT.append(stepFilter(Table, 0, 0))
# LUTname.append('sulc')
# elif Name == 'thickness':
# LUT.append(Table) # no filtering for thickness
# LUTname.append('thickness')
#
# Prefix= CurvFile
#
# NbrLst = libbasin.fcNbrLst(Fc, SurfFile) # Activated 2011-04-30, 21:46
#
# Center = libskel.getCenter(Fc, Curvature) # get the per-face curvature
#
# Candidate, Strict = libskel.faceFundi(Fc, Curvature, NbrLst, Center, Skip2= True, Skip3 = True)
#
# VrtxNbrLst = libbasin.vrtxNbrLst(len(Vrtx), Fc, SurfFile) # activated on 2011-04-30 21:55
#
# FaceCmpntFile = CurvFile + '.cmpnt.face'
# FaceCmpnts = io_file.loadCmpnt(FaceCmpntFile)
# Clouds = libskel.fc2VrtxCmpnt(Fc, Candidate, FaceCmpnts) # step 1: Convert faces in to Vrtx, clustered by components
# DTMap = libskel.myDT(Clouds, VrtxNbrLst) # step 2: my own distance transformation
#
# DTLUT = [0 for i in xrange(0,len(VrtxNbrLst))] # initialize the LUT for all vertexes in the surface as -1
# for i in xrange(0,len(Clouds)):
# for j in xrange(0,len(Clouds[i])):
# DTLUT[Clouds[i][j]] = DTMap[i][j]
#
# LUTname.append('DT')
# LUT.append(DTLUT)
#
## output 2: Skeletons
#
# Skeletons = libskel.faceToCurve(Fc, Candidate, FaceCmpnts, VrtxNbrLst)
#
# Candidate = []
# for Skeleton in Skeletons:
# Candidate += Skeleton
#
# FCandidate = Prefix + '.skeletons'
# io_file.writeList(FCandidate, Candidate)
#
# print "\t Saving Skeletons into VTK files"
#
# VTKFile = FCandidate + "." + SurfFile[-1*SurfFile[::-1].find('.'):] + '.vtk'
# libvtk.vrtxLst2VTK(VTKFile, SurfFile, FCandidate)
# if SurfFile2 != '':
# VTKFile = FCandidate + "." + SurfFile2[-1*SurfFile2[::-1].find('.'):] + '.vtk'
# libvtk.vrtxLst2VTK(VTKFile, SurfFile2, FCandidate)
## End of output 2: Skeletons
#
## output 3: fundus curves connected from skeletons
#
# VrtxCmpntFile = CurvFile + '.cmpnt.vrtx'
# VrtxCmpnt = io_file.loadCmpnt(VrtxCmpntFile)
#
# Candidate, NodeColor, FundusLen, FundusID = lineUp(Candidate, VrtxNbrLst, VrtxCmpnt, Vrtx, CurvFile, Curvature) # activated 2011-05-28 17:53
#
# print "\t Saving fundi from Skeleton into VTK files"
#
# LenLUT = [0 for i in xrange(0,len(NbrLst))]
# for Key, Value in FundusLen.iteritems():
# LenLUT[Key] = Value
#
# LUTname.append('fundusLength')
# LUT.append(LenLUT)
#
# FIDLUT = [-1 for i in xrange(0,len(NbrLst))] # value for gyri is now -1 Forrest 2011-11-01
# for Key, Value in FundusID.iteritems():
# FIDLUT[Key] = Value
#
# LUTname.append('fundusID')
# LUT.append(FIDLUT)
#
# FCandidate = Prefix + '.fundi.from.skeletons'
# io_file.writeFundiSeg(FCandidate, Candidate)
#
# VTKFile = FCandidate + "." + SurfFile[-1*SurfFile[::-1].find('.'):] + '.vtk'
# libvtk.seg2VTK(VTKFile, SurfFile, FCandidate, LUT=LUT, LUTname=LUTname)
# if SurfFile2 != '':
# VTKFile = FCandidate + "." + SurfFile2[-1*SurfFile2[::-1].find('.'):] + '.vtk'
# libvtk.seg2VTK(VTKFile, SurfFile2, FCandidate, LUT=LUT, LUTname=LUTname)
#
## End of output 3: fundus curves connected from skeletons
|
binarybottle/mindboggle_sidelined
|
fundi_from_pits/libfundi.py
|
Python
|
apache-2.0
| 39,768
|
[
"Gaussian",
"VTK"
] |
de6c32d3f5ae96ce253ba8352e83c8ea15820c2f6a1ffff8f1621b435ec0db31
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests for the factor module in antitile.
"""
import unittest
import doctest
from operator import mul
from functools import reduce
from itertools import product
from antitile import factor
class TestFactor(unittest.TestCase):
def setUp(self):
self.domains = [factor.Integer,
factor.Gaussian,
factor.Eisenstein,
factor.Steineisen]
def test_multiplication(self):
for d in self.domains:
for v1 in product(range(-5, 10), repeat=2):
for v2 in product(range(-5, 10), repeat=2):
number1 = d(*v1)
number2 = d(*v2)
px = number1 * number2
anorm1 = number1.anorm()
anorm2 = number2.anorm()
self.assertEqual(px.anorm(), anorm1*anorm2)
def test_factoring(self):
for d in self.domains:
for v in product(range(-5, 100), repeat=2):
number = d(*v)
fx = number.factor()
backcalc_anorm = reduce(mul, (f.anorm() for f in fx), 1)
self.assertEqual(number.anorm(), backcalc_anorm)
unit = d(1)
backcalc = reduce(mul, fx, unit)
self.assertEqual(number, backcalc)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(factor))
return tests
if __name__ == '__main__':
unittest.main()
|
brsr/antitile
|
tests/test_factor.py
|
Python
|
mit
| 1,525
|
[
"Gaussian"
] |
26745b715bc2eda4b035d8767ece5e0b12b709339c8c8f321e0c0254ed1b8426
|
"""User-friendly public interface to polynomial functions. """
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy,
)
from sympy.core.sympify import (
sympify, SympifyError,
)
from sympy.core.decorators import (
_sympifyit,
)
from sympy.polys.polyclasses import (
DMP, ANP, DMF,
)
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import (
together,
)
from sympy.polys.rootisolation import (
dup_isolate_real_roots_list,
)
from sympy.polys.groebnertools import (
sdp_from_dict, sdp_div, sdp_groebner,
)
from sympy.polys.monomialtools import (
Monomial, monomial_key,
)
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
PolificationFailed, FlagError,
MultivariatePolynomialError,
ExactQuotientFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.polys.polycontext import (
register_context,
)
from sympy.mpmath import (
polyroots as npolyroots,
)
from sympy.utilities import (
any, all, group,
)
from sympy.ntheory import isprime
import sympy.polys
from sympy.polys.domains import FF, QQ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if hasattr(rep, '__iter__'):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError("invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens)-1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded("can't initialize from 'dict' without generators")
level = len(gens)-1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.iteritems():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded("can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError("'list' representation not supported")
level = len(gens)-1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = map(domain.convert, rep)
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
order = opt.order
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'order' in opt:
rep = rep.set_order(order)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def __getnewargs__(self):
"""Data used by pickling protocol version 2. """
return (self.rep, self.gens)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.gens:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
[x**2 + 1]
"""
return [self.as_expr()]
@property
def gen(self):
"""
Return the principal generator.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(f):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens)-1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [ dom.convert(c, f.rep.dom) for c in f_coeffs ]
F = DMP(dict(zip(f_monoms, f_coeffs)), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [ dom.convert(c, g.rep.dom) for c in g_coeffs ]
G = DMP(dict(zip(g_monoms, g_coeffs)), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
**Examples**
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if not domain.has_CharacteristicZero:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
**Example**
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError("syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.gens:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError("generators list can differ only up to order of elements")
rep = dict(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens)))
return f.per(DMP(rep, f.rep.dom, len(gens)-1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.iteritems():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens)-1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
f_gens = list(f.gens)
indices = set([])
for gen in gens:
try:
index = f_gens.index(gen)
except ValueError:
raise GeneratorsError("%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
**Examples**
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
**Examples**
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
**Examples**
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True), field=field)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order) ]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order) ]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs() ]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms() ]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func((k,), coeff):
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a polynomial an expression.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.iteritems():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError("%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
**Examples**
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.gens + f.gens
else:
gens = f.gens + dom.gens
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[n-k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:n-k], False
else:
raise NotImplementedError("can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly, ZZ, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" % (length, length, gen))
else:
try:
return list(f.gens).index(sympify(gen))
except ValueError:
raise PolynomialError("a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
3
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficent of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
"""
if hasattr(f.rep, 'nth'):
result = f.rep.nth(*map(int, N))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
(2, 0)
"""
return f.monoms(order)[0]
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
(0, 1)
"""
return f.monoms(order)[-1]
def LT(f, order=None):
"""
Returns the leading term of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
((2, 0), 4)
"""
return f.terms(order)[0]
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
((0, 1), 3)
"""
return f.terms(order)[-1]
def max_norm(f):
"""
Returns maximum norm of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(f, convert=False):
"""
Clear denominators, but keep the ground domain.
**Examples**
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
if not f.rep.dom.has_Field:
return S.One, f
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(f, g):
"""
Clear denominators in a rational function ``f/g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(f, *specs, **args):
"""
Computes indefinite integral of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs):
"""
Computes partial derivative of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
def eval(f, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
"""
if a is None:
if isinstance(x, (list, dict)):
try:
mapping = x.items()
except AttributeError:
mapping = x
for gen, value in mapping:
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
domain, [a] = construct_domain([a])
f = f.set_domain(domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS sequence of ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return map(per, result)
def resultant(f, g):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).resultant(Poly(x**2 - 1, x))
4
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(f, auto=True):
"""
Divides all coefficients by ``LC(f)``.
**Examples**
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return map(f.per, result)
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(f, auto=True):
"""
Computes the Sturm sequence of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return map(f.per, result)
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [ (f.per(g), k) for g, k in result ]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
**Examples**
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*3**(1/2)*x + 4, x, domain='QQ<3**(1/2)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
**Examples**
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [ (f.per(g), k) for g, k in factors ]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [ (f.per(g), k) for g, k in factors ]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return map(_real, result)
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return map(_real, result)
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
**Examples**
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = map(QQ.convert, (re, im)), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = map(QQ.convert, (re, im)), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
RootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.RootOf(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[RootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.RootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[RootOf(x**3 + x + 1, 0), RootOf(x**3 + x + 1, 1), RootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.RootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots()
[-1.73205080756888, 1.73205080756888]
"""
if f.is_multivariate:
raise MultivariatePolynomialError("can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
try:
coeffs = [ complex(c) for c in f.all_coeffs() ]
except ValueError:
raise DomainError("numerical domain expected, got %s" % f.rep.dom)
result = npolyroots(coeffs, maxsteps=maxsteps, cleanup=cleanup, error=error)
if error:
roots, error = result
else:
roots, error = result, None
roots = map(sympify, sorted(roots, key=lambda r: (r.real, r.imag)))
if error is not None:
return roots, sympify(error)
else:
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError("can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError("must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` has zero trailing coefficient.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + x + y, x, y).is_homogeneous
True
>>> Poly(x*y + x + y + 1, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
**Examples**
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
**Examples**
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
lev = len(gens)-1
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt['gens'] = poly.gens
opt['domain'] = poly.domain
if opt.polys is None:
opt['polys'] = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = zip(*rep.items())
domain = opt.domain
if domain is None:
domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = map(domain.from_sympy, coeffs)
level = len(opt.gens)-1
poly = Poly.new(DMP.from_monoms_coeffs(monoms, coeffs, level, domain), *opt.gens)
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = False
return poly, opt
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt['gens'] = f.gens
opt['domain'] = f.domain
if opt.polys is None:
opt['polys'] = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = zip(*rep.items())
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = map(domain.from_sympy, coeffs_list)
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys, level = [], len(opt.gens)-1
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = DMP.from_monoms_coeffs(monoms, coeffs, level, domain)
polys.append(Poly.new(rep, *opt.gens))
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
def _keep_coeff(coeff, factors):
"""Return ``coeff*factors`` unevaluated if necessary. """
if coeff == 1:
return factors
elif coeff == -1:
return -factors
elif not factors.is_Add:
return coeff*factors
else:
return Mul(coeff, factors, evaluate=False)
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
**Examples**
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree', 1, exc)
return Integer(F.degree(opt.gen))
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
**Examples**
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
**Examples**
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
**Examples**
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LM', 1, exc)
monom = Monomial(*F.LM(order=opt.order))
return monom.as_expr(*opt.gens)
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
**Examples**
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*Monomial(*monom).as_expr(*opt.gens)
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
**Examples**
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
**Examples**
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
**Examples**
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pquo', 2, exc)
q = F.pquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
**Examples**
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
**Examples**
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
**Examples**
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
**Examples**
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
**Examples**
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
**Examples**
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'half_gcdex'):
try:
return f.half_gcdex(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('half_gcdex', 2, exc)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
**Examples**
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'gcdex'):
try:
return f.gcdex(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('gcdex', 2, exc)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
**Examples**
>>> from sympy import invert
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'invert'):
try:
return f.invert(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
**Examples**
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
**Examples**
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('resultant', 2, exc)
result = F.resultant(G)
if not opt.polys:
return result.as_expr()
else:
return result
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
**Examples**
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
**Examples**
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'cofactors'):
try:
return f.cofactors(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('cofactors', 2, exc)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
**Examples**
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
if not gens and not args:
if not seq:
return S.Zero
seq = sympify(seq)
if all(s.is_Number for s in seq):
result, numbers = seq[0], seq[1:]
for number in numbers:
result = result.gcd(number)
if result is S.One:
break
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
**Examples**
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'gcd'):
try:
return f.gcd(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
**Examples**
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
if not gens and not args:
if not seq:
return S.One
seq = sympify(seq)
if all(s.is_Number for s in seq):
result, numbers = seq[0], seq[1:]
for number in numbers:
result = result.lcm(number)
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
**Examples**
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
f, g = exc.exprs
if hasattr(f, 'lcm'):
try:
return f.lcm(g)
except (SympifyError, ValueError):
pass
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
**Examples**
>>> from sympy import terms_gcd
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[ x**j for x, j in zip(f.gens, J) ])
return _keep_coeff(coeff, term*f.as_expr())
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
**Examples**
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
**Examples**
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
**Examples**
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('content', 1, exc)
return F.content()
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
**Examples**
>>> from sympy import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
**Examples**
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
**Examples**
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
**Examples**
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
**Examples**
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [ (g.as_expr(), k) for g, k in factors ]
else:
return factors
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
**Examples**
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*3**(1/2)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
**Examples**
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[ f.as_expr()**k for f, k in factors ])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
for arg in Mul.make_args(expr):
if arg.is_Pow:
base, exp = arg.args
else:
base, exp = arg, S.One
if base.is_Number:
coeff *= arg
else:
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed, exc:
coeff *= exc.expr**exp
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
coeff *= _coeff**exp
if exp is S.One:
factors.extend(_factors)
else:
for factor, k in _factors:
factors.append((factor, k*exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[ _symbolic_factor(arg, opt, method) for arg in expr.args ])
elif hasattr(expr, '__iter__'):
return expr.__class__([ _symbolic_factor(arg, opt, method) for arg in expr ])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [ (f.as_expr(), k) for f, k in fp ]
fq = [ (f.as_expr(), k) for f, k in fq ]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
**Examples**
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
**Examples**
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
**Examples**
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
def factor(f, *gens, **args):
"""
Compute the factorization of ``f`` into irreducibles. (Use factorint to
factor an integer.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
**Examples**
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - 2**(1/2))*(x + 2**(1/2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
"""
return _generic_factor(f, gens, args, method='factor')
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
**Examples**
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
**Examples**
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError("can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
**Examples**
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
**Examples**
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
def nroots(f, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
**Examples**
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3)
[-1.73205080756888, 1.73205080756888]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(maxsteps=maxsteps, cleanup=cleanup, error=error)
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
**Examples**
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
**Examples**
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
**Examples**
>>> from sympy import cancel
>>> from sympy.abc import x
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
"""
options.allowed_flags(args, ['polys'])
f = sympify(f)
if type(f) is not tuple:
if f.is_Number:
return f
else:
p, q = f.as_numer_denom()
else:
p, q = f
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed, exc:
if type(f) is not tuple:
return f
else:
return S.One, p, q
c, P, Q = F.cancel(G)
if type(f) is not tuple:
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
**Examples**
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('reduced', 0, exc)
for i, poly in enumerate(polys):
polys[i] = sdp_from_dict(poly.rep.to_dict(), opt.order)
level = len(opt.gens)-1
Q, r = sdp_div(polys[0], polys[1:], level, opt.order, opt.domain)
Q = [ Poly.new(DMP(dict(q), opt.domain, level), *opt.gens) for q in Q ]
r = Poly.new(DMP(dict(r), opt.domain, level), *opt.gens)
if not opt.polys:
return [ q.as_expr() for q in Q ], r.as_expr()
else:
return Q, r
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
**Examples**
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> groebner([x*y - 2*y, 2*y**2 - x**2], order='lex')
[x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y]
>>> groebner([x*y - 2*y, 2*y**2 - x**2], order='grlex')
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
>>> groebner([x*y - 2*y, 2*y**2 - x**2], order='grevlex')
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
**References**
1. [Buchberger01]_
2. [Cox97]_
"""
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('groebner', len(F), exc)
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError("can't compute a Groebner basis over %s" % domain)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = sdp_from_dict(poly, opt.order)
level = len(gens)-1
G = sdp_groebner(polys, level, opt.order, opt.domain)
G = [ Poly._from_dict(dict(g), opt) for g in G ]
if not domain.has_Field:
G = [ g.clear_denoms(convert=True)[1] for g in G ]
if not opt.polys:
return [ g.as_expr() for g in G ]
else:
return G
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
**Examples**
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(**args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
minrk/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 133,278
|
[
"Gaussian"
] |
48385c7739953a8c2a3a1aaaf50ade9214fd6664d5bfdd988dccf5f5e6c48aa3
|
#!/usr/bin/env python
# This example demonstrates the generation of a streamsurface.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Read the data and specify which scalars and vectors to read.
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
# We use a rake to generate a series of streamline starting points
# scattered along a line. Each point will generate a streamline. These
# streamlines are then fed to the vtkRuledSurfaceFilter which stitches
# the lines together to form a surface.
rake = vtk.vtkLineSource()
rake.SetPoint1(15, -5, 32)
rake.SetPoint2(15, 5, 32)
rake.SetResolution(21)
rakeMapper = vtk.vtkPolyDataMapper()
rakeMapper.SetInputConnection(rake.GetOutputPort())
rakeActor = vtk.vtkActor()
rakeActor.SetMapper(rakeMapper)
integ = vtk.vtkRungeKutta4()
sl = vtk.vtkStreamLine()
sl.SetInputData(pl3d_output)
sl.SetSourceConnection(rake.GetOutputPort())
sl.SetIntegrator(integ)
sl.SetMaximumPropagationTime(0.1)
sl.SetIntegrationStepLength(0.1)
sl.SetIntegrationDirectionToBackward()
sl.SetStepLength(0.001)
# The ruled surface stiches together lines with triangle strips.
# Note the SetOnRatio method. It turns on every other strip that
# the filter generates (only when multiple lines are input).
scalarSurface = vtk.vtkRuledSurfaceFilter()
scalarSurface.SetInputConnection(sl.GetOutputPort())
scalarSurface.SetOffset(0)
scalarSurface.SetOnRatio(2)
scalarSurface.PassLinesOn()
scalarSurface.SetRuledModeToPointWalk()
scalarSurface.SetDistanceFactor(30)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(scalarSurface.GetOutputPort())
mapper.SetScalarRange(pl3d_output.GetScalarRange())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Put an outline around for context.
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Now create the usual graphics stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(rakeActor)
ren.AddActor(actor)
ren.AddActor(outlineActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
iren.Initialize()
renWin.Render()
iren.Start()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Examples/VisualizationAlgorithms/Python/streamSurface.py
|
Python
|
mit
| 2,703
|
[
"VTK"
] |
2aca9bc99ac5e4bde1329fd0701d8568f734cd1820cfb7b57ee6461634eaee3e
|
# Copyright (C) 2018 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian.
"""
import numpy
from pycbc import filter as pyfilter
from pycbc.waveform import get_fd_waveform
from pycbc.detector import Detector
from .gaussian_noise import BaseGaussianNoise
from .tools import DistMarg
class SingleTemplate(DistMarg, BaseGaussianNoise):
r"""Model that assumes we know all the intrinsic parameters.
This model assumes we know all the intrinsic parameters, and are only
maximizing over the extrinsic ones. We also assume a dominant mode waveform
approximant only and non-precessing.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
sample_rate : int, optional
The sample rate to use. Default is 32768.
polarization_samples: int, optional
Parameter to specify how finely to marginalize over polarization angle.
If None, then polarization must be a parameter.
\**kwargs :
All other keyword arguments are passed to
:py:class:`BaseGaussianNoise`; see that class for details.
"""
name = 'single_template'
def __init__(self, variable_params, data, low_frequency_cutoff,
sample_rate=32768,
polarization_samples=None,
**kwargs):
variable_params, kwargs = self.setup_distance_marginalization(
variable_params,
marginalize_phase=True,
**kwargs)
super(SingleTemplate, self).__init__(
variable_params, data, low_frequency_cutoff, **kwargs)
# Generate template waveforms
df = data[self.detectors[0]].delta_f
p = self.static_params.copy()
if 'distance' in p:
_ = p.pop('distance')
if 'inclination' in p:
_ = p.pop('inclination')
hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)
# Extend template to high sample rate
flen = int(int(sample_rate) / df) / 2 + 1
hp.resize(flen)
#polarization array to marginalize over if num_samples given
self.pflag = 0
if polarization_samples is not None:
self.polarization = numpy.linspace(0, 2*numpy.pi,
int(polarization_samples))
self.pflag = 1
# Calculate high sample rate SNR time series
self.sh = {}
self.hh = {}
self.det = {}
for ifo in self.data:
flow = self.kmin[ifo] * df
fhigh = self.kmax[ifo] * df
# Extend data to high sample rate
self.data[ifo].resize(flen)
self.det[ifo] = Detector(ifo)
snr, _, _ = pyfilter.matched_filter_core(
hp, self.data[ifo],
psd=self.psds[ifo],
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
self.sh[ifo] = 4 * df * snr
self.hh[ifo] = pyfilter.sigmasq(
hp, psd=self.psds[ifo],
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
self.time = None
def _loglr(self):
r"""Computes the log likelihood ratio
Returns
-------
float
The value of the log likelihood ratio.
"""
# calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant
p = self.current_params.copy()
p.update(self.static_params)
if self.pflag == 0:
polarization = p['polarization']
elif self.pflag == 1:
polarization = self.polarization
if self.time is None:
self.time = p['tc']
sh_total = hh_total = 0
for ifo in self.sh:
fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'],
polarization, self.time)
dt = self.det[ifo].time_delay_from_earth_center(p['ra'], p['dec'],
self.time)
ic = numpy.cos(p['inclination'])
ip = 0.5 * (1.0 + ic * ic)
htf = (fp * ip + 1.0j * fc * ic) / p['distance']
sh = self.sh[ifo].at_time(p['tc'] + dt) * htf
sh_total += sh
hh_total += self.hh[ifo] * abs(htf) ** 2.0
return self.marginalize_loglr(sh_total, hh_total)
|
tdent/pycbc
|
pycbc/inference/models/single_template.py
|
Python
|
gpl-3.0
| 5,678
|
[
"Gaussian"
] |
bfa1e75343bafd4aac277b9c1119417a7b7f3cbeef3edd0a9f14563ef465f923
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Convert CNR Wave Data to NetCDF DSG (CF-1.6)
# <markdowncell>
# From Davide Bonaldo at CNR-ISMAR :
# here's a time series of wave data from Jesolo.
# * Columns 1 to 6: date (y m d h m s)
# * Column 7: Significant wave height (m)
# * Column 8: Mean period (s)
# * Column 9: Mean direction (deg)
# * Column 10: Sea surface elevation (m)
# <codecell>
import numpy as np
import urllib
%matplotlib inline
# <codecell>
url='https://www.dropbox.com/s/0epy3vsjgl1h8ld/ONDE_Jesolo.txt?dl=1'
local_file = '/usgs/data2/notebook/data/ONDE_Jesolo.txt'
# <codecell>
urllib.urlretrieve (url,local_file)
# <codecell>
from datetime import datetime
import pandas as pd
def date_parser(year, month, day, hour, minute, second):
var = year, month, day, hour, minute, second
var = [int(float(x)) for x in var]
return datetime(*var)
df = pd.read_csv(local_file, header=None,
delim_whitespace=True, index_col='datetime',
parse_dates={'datetime': [0, 1, 2, 3, 4, 5]},
date_parser=date_parser)
# <codecell>
df.columns=['Hsig','Twave','Dwave','Wlevel']
# <codecell>
df[['Hsig','Wlevel']].plot()
# <codecell>
import calendar
times = [ calendar.timegm(x.timetuple()) for x in df.index ]
times=np.asarray(times, dtype=np.int64)
# <codecell>
def pd_to_secs(df):
import calendar
"""
convert a pandas datetime index to seconds since 1970
"""
return np.asarray([ calendar.timegm(x.timetuple()) for x in df.index ], dtype=np.int64)
# <codecell>
secs = pd_to_secs(df)
# <codecell>
# z is positive down, will generate ADCP if all z is not the same, simple time series otherwise
z = np.zeros_like(secs)
# <codecell>
values = df['Hsig'].values
# <codecell>
from pytools.netcdf.sensors import create,ncml,merge,crawl
# <codecell>
sensor_urn='urn:it.cnr.ismar.ve:sensor:wave_height'
station_urn='urn:it.cnr.ismar.ve:station:onda'
# <codecell>
attributes={'units':'m'}
# <codecell>
create.create_timeseries_file(output_directory='/usgs/data2/notebook/data',
latitude=41.5,
longitude=-69.1,
full_station_urn=station_urn,
full_sensor_urn=sensor_urn,
sensor_vertical_datum=0.0,
times=secs,
verticals=z,
values=values,
attributes=attributes,
global_attributes={},
output_filename='wave_data.nc')
# <codecell>
|
rsignell-usgs/notebook
|
wave_data_to_netcdf.py
|
Python
|
mit
| 2,702
|
[
"NetCDF"
] |
5c26896e71aef8fb01f33d992932bfd14f97a397f70982274afc87b098ba477d
|
#!/usr/bin/env python
#
# $File: reichEvolve.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
def simulate(model, N0, N1, G0, G1, spec, s, mu, k):
'''Evolve a sim.Population using given demographic model
and observe the evolution of its allelic spectrum.
model: type of demographic model.
N0, N1, G0, G1: parameters of demographic model.
spec: initial allelic spectrum, should be a list of allele
frequencies for each allele.
s: selection pressure.
mu: mutation rate.
k: k for the k-allele model
'''
demo_func = demo_model(model, N0, N1, G0, G1)
pop = sim.Population(size=demo_func(0), loci=1, infoFields='fitness')
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=spec, loci=0)
],
matingScheme=sim.RandomMating(subPopSize=demo_func),
postOps=[
sim.KAlleleMutator(k=k, rates=mu),
sim.MaSelector(loci=0, fitness=[1, 1, 1 - s], wildtype=0),
ne(loci=[0], step=100),
sim.PyEval(r'"%d: %.2f\t%.2f\n" % (gen, 1 - alleleFreq[0][0], ne[0])',
step=100),
],
gen = G0 + G1
)
simulate('instant', 1000, 10000, 500, 500, [0.9]+[0.02]*5, 0.01, 1e-4, 200)
|
BoPeng/simuPOP
|
docs/reichEvolve.py
|
Python
|
gpl-2.0
| 2,263
|
[
"VisIt"
] |
c653b812a46729fe7121849d27166a816d60f22968c50c88d0d3992059020657
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.errors import AnsibleOptionsError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
from ansible.utils.path import makedirs_safe
BOOL_TRUE = frozenset([ "true", "t", "y", "1", "yes", "on" ])
def mk_boolean(value):
ret = value
if not isinstance(value, bool):
if value is None:
ret = False
ret = (str(value).lower() in BOOL_TRUE)
return ret
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False):
''' return a configuration variable with casting
:arg p: A ConfigParser object to look for the configuration in
:arg section: A section of the ini config that should be examined for this section.
:arg key: The config key to get this config from
:arg env_var: An Environment variable to check for the config var. If
this is set to None then no environment variable will be used.
:arg default: A default value to assign to the config var if nothing else sets it.
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:integer: Sets the value to an integer or raises a ValueType error
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:kwarg expand_relative_paths: for pathlist and path types, if this is set
to True then also change any relative paths into absolute paths. The
default is False.
'''
value = _get_config(p, section, key, env_var, default)
if value_type == 'boolean':
value = mk_boolean(value)
elif value:
if value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = shell_expand(value, expand_relative_paths=expand_relative_paths)
elif value_type == 'tmppath':
value = shell_expand(value)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) \
for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
value = default
if p is not None:
try:
value = p.get(section, key, raw=True)
except:
pass
if env_var is not None:
env_value = os.environ.get(env_var, None)
if env_value is not None:
value = env_value
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
try:
path1 = os.getcwd() + "/ansible.cfg"
except OSError:
path1 = None
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', value_type='path')
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
# If --tags or --skip-tags is given multiple times on the CLI and this is
# True, merge the lists of tags together. If False, let the last argument
# overwrite any previous ones. Behaviour is overwrite through 2.2. 2.3
# overwrites but prints deprecation. 2.4 the default is to merge.
MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', False, value_type='boolean')
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, value_type='path')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FACT_PATH = get_config(p, DEFAULTS, 'fact_path', 'ANSIBLE_FACT_PATH', None, value_type='path')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer')
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, value_type='boolean')
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer')
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer')
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean')
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path')
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer')
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean')
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path')
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean')
DEFAULT_SSH_TRANSFER_METHOD = get_config(p, 'ssh_connection', 'transfer_method', 'ANSIBLE_SSH_TRANSFER_METHOD', None)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
SHOW_CUSTOM_STATS = get_config(p, DEFAULTS, 'show_custom_stats', 'ANSIBLE_SHOW_CUSTOM_STATS', False, value_type='boolean')
# static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean')
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean')
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean')
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean')
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean')
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs, 9p', value_type='list')
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean')
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean')
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean')
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean')
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'doas': 'Permission denied', 'dzdo': '', 'ksu': 'Password incorrect'} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'doas': 'Authorization required', 'dzdo': '', 'ksu': 'No password given'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean')
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS', None, value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos'], value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer')
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean')
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean')
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean')
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list')
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean')
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean')
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean')
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean')
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean')
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list')
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean')
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean')
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean')
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path')
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none')
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean')
MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, value_type='integer')
# CONNECTION RELATED
USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean')
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
### WARNING: Someone might be tempted to switch this from percent-formatting
# to .format() in the future. be sure to read this:
# http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ and understand
# that it may be a security risk to do so.
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', None)
ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp')
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean')
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer')
ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh')
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean')
PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean')
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean')
PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer')
PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 10, value_type='integer')
PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer')
# obsolete -- will be formally removed
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer')
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer')
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float')
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer')
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean')
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean')
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'], value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer')
DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"]
INTERNAL_RESULT_KEYS = ['add_host', 'add_group']
RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
|
t0mk/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 29,560
|
[
"Galaxy",
"MOOSE"
] |
263d2ac7e769dd96b66b02f29fcb385a82249e2217d31bd34f509ae617be5fd5
|
"""\
CGBF.py Perform basic operations over contracted gaussian basis
functions. Uses the functions in PGBF.py.
References:
OHT = K. O-ohata, H. Taketa, S. Huzinaga. J. Phys. Soc. Jap. 21, 2306 (1966).
THO = Taketa, Huzinaga, O-ohata, J. Phys. Soc. Jap. 21,2313 (1966).
This program is part of the PyQuante quantum chemistry program suite
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
import PGBF
from NumWrap import zeros,array
from math import sqrt
from PyQuante.cints import overlap
#from PyQuante.chgp import contr_coulomb
from PyQuante.crys import contr_coulomb
from PyQuante.contracted_gto import ContractedGTO
class CGBF(ContractedGTO):
"Class for a contracted Gaussian basis function"
def __init__(self,origin,powers=(0,0,0),atid=0):
super(CGBF, self).__init__(origin, powers, atid)
self.origin = tuple([float(i) for i in origin])
self.powers = powers
self.norm = 1.
self.prims = []
self.pnorms = []
self.pexps = []
self.pcoefs = []
self.ang_mom = sum(powers)
#added by Hatem H Helal hhh23@cam.ac.uk
#stores atom id number for easy method to identify which atom
#a particular bf is centered on
self.atid = atid
return
def __repr__(self):
s = "<cgbf atomid=%d origin=\"(%f,%f,%f)\" powers=\"(%d,%d,%d)\">\n" % \
(self.atid,self.origin[0],self.origin[1],self.origin[2],
self.powers[0],self.powers[1],self.powers[2])
for prim in self.prims:
s = s + prim.prim_str(self.norm)
s = s + "</cgbf>\n"
return s
def center(self,other):
# Crude estimate to where the center is. The correct form
# would use gaussian_product_center, but this has multiple
# values for each pair of primitives
xa,ya,za = self.origin
xb,yb,zb = other.origin
return 0.5*(xa+xb),0.5*(ya+yb),0.5*(za+zb)
def add_primitive(self,exponent,coefficient):
"Add a primitive BF to this contracted set"
pbf = PGBF.PGBF(exponent,self.origin,self.powers)
pbf.coef = coefficient # move into PGBF constructor
super(CGBF,self).add_primitive(pbf,coefficient)
self.pnorms.append(pbf.norm)
self.prims.append(pbf)
self.pexps.append(exponent)
self.pcoefs.append(coefficient)
return
def reset_powers(self,px,py,pz):
self.powers = (px,py,pz)
for prim in self.prims:
prim.reset_powers(px,py,pz)
return
# Normalize defined in the superclass...
def overlap(self,other):
"Overlap matrix element with another CGBF"
Sij = 0.
for ipbf in self.prims:
for jpbf in other.prims:
Sij = Sij + ipbf.coef*jpbf.coef*ipbf.overlap(jpbf)
return self.norm*other.norm*Sij
def kinetic(self,other):
"KE matrix element with another CGBF"
Tij = 0.
for ipbf in self.prims:
for jpbf in other.prims:
Tij = Tij + ipbf.coef*jpbf.coef*ipbf.kinetic(jpbf)
return self.norm*other.norm*Tij
def multipole(self,other,i,j,k):
"Overlap matrix element with another CGBF"
Mij = 0.
for ipbf in self.prims:
for jpbf in other.prims:
Mij += ipbf.coef*jpbf.coef*ipbf.multipole(jpbf,i,j,k)
return self.norm*other.norm*Mij
def nuclear(self,other,C):
"Nuclear matrix element with another CGBF and a center C"
Vij = 0.
for ipbf in self.prims:
for jpbf in other.prims:
Vij = Vij + ipbf.coef*jpbf.coef*ipbf.nuclear(jpbf,C)
return self.norm*other.norm*Vij
def amp(self,x,y,z):
"Compute the amplitude of the CGBF at point x,y,z"
val = 0.
for prim in self.prims: val+= prim.amp(x,y,z)
return self.norm*val
def move_center(self,dx,dy,dz):
"Move the basis function to another center"
self.origin = (self.origin[0]+dx,self.origin[1]+dy,self.origin[2]+dz)
for prim in self.prims: prim.move_center(dx,dy,dz)
return
def doverlap(self,other,dir):
"Overlap of func with derivative of another"
dSij = 0.
l = other.powers[dir]
ijk_plus = list(other.powers)
ijk_plus[dir] += 1
ijk_plus = tuple(ijk_plus)
for ipbf in self.prims:
for jpbf in other.prims:
dSij += 2*jpbf.exp*ipbf.coef*jpbf.coef*\
ipbf.norm*jpbf.norm*\
overlap(ipbf.exp,ipbf.powers,ipbf.origin,
jpbf.exp,ijk_plus,jpbf.origin)
if l>0:
ijk_minus = list(other.powers)
ijk_minus[dir] -= 1
ijk_minus = tuple(ijk_minus)
for ipbf in self.prims:
for jpbf in other.prims:
dSij -= l*ipbf.coef*jpbf.coef*\
ipbf.norm*jpbf.norm*\
overlap(ipbf.exp,ipbf.powers,ipbf.origin,
jpbf.exp,ijk_minus,jpbf.origin)
return self.norm*other.norm*dSij
def doverlap_num(self,other,dir):
"Overlap of func with derivative of another: numeric approximation"
dSij = 0.
delta = 0.001 # arbitrary shift amount
origin_plus = list(other.origin)
origin_plus[dir] += delta
origin_plus = tuple(origin_plus)
origin_minus = list(other.origin)
origin_minus[dir] -= delta
origin_minus = tuple(origin_minus)
for ipbf in self.prims:
for jpbf in other.prims:
dSij += 0.5*ipbf.coef*jpbf.coef*ipbf.norm*jpbf.norm*(
overlap(ipbf.exp,ipbf.powers,ipbf.origin,
jpbf.exp,ipbf.powers,origin_plus)
-overlap(ipbf.exp,ipbf.powers,ipbf.origin,
jpbf.exp,ipbf.powers,origin_plus)
)/delta
return self.norm*other.norm*dSij
def laplacian(self,pos):
"Evaluate the laplacian of the function at pos=x,y,z"
val = 0.
for prim in self.prims: val += prim.laplacian(pos)
return self.norm*val
def grad(self,x,y,z):
"Evaluate the grad of the function at pos=x,y,z"
val = zeros(3,'d')
for prim in self.prims:
val += prim.grad(x,y,z)
return self.norm*val
def coulomb(a,b,c,d):
"Coulomb interaction between 4 contracted Gaussians"
Jij = contr_coulomb(a.pexps,a.pcoefs,a.pnorms,a.origin,a.powers,
b.pexps,b.pcoefs,b.pnorms,b.origin,b.powers,
c.pexps,c.pcoefs,c.pnorms,c.origin,c.powers,
d.pexps,d.pcoefs,d.pnorms,d.origin,d.powers)
return a.norm*b.norm*c.norm*d.norm*Jij
def three_center(a,b,c):
import PGBF
sum = 0
for ac,ap in zip(a.pcoefs,a.prims):
for bc,bp in zip(b.pcoefs,b.prims):
for cc,cp in zip(c.pcoefs,c.prims):
sum += ac*bc*cc*PGBF.three_center(ap,bp,cp)
return a.norm*b.norm*c.norm*sum
|
gabrielelanaro/pyquante
|
PyQuante/CGBF.py
|
Python
|
bsd-3-clause
| 7,318
|
[
"Gaussian"
] |
9a7be6ec6b41614a0f613aa57dd2365963293ce0e4435c146168d32b0780d83f
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
- Units(SEC).
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
"""
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
|
noroutine/ansible
|
lib/ansible/modules/network/avi/avi_sslprofile.py
|
Python
|
gpl-3.0
| 8,059
|
[
"VisIt"
] |
882dc13968a262b66efe35877b2e76992f0644eaac17688ad6daaf67a2e43100
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2011, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 or (at your
# option) any later version as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
|
zenoss/ZenPacks.zenoss.PostgreSQL
|
ZenPacks/zenoss/PostgreSQL/parsers/__init__.py
|
Python
|
gpl-2.0
| 560
|
[
"VisIt"
] |
7ff2ed461e41b204872862ffecf88d9ba49f8dd648044a12569b8f304c1aedb2
|
## begin license ##
#
# "CQLParser" is a parser that builds a parsetree for the given CQL and can convert this into other formats.
#
# Copyright (C) 2012-2013, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2021 SURF https://www.surf.nl
# Copyright (C) 2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "CQLParser"
#
# "CQLParser" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "CQLParser" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "CQLParser"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from seecrtest.timing import T
from unittest import TestCase
#from cq2utils.profileit import profile
from cqlparser import parseString, cql2string, CqlIdentityVisitor, CqlVisitor
from time import time
class SpeedTest(TestCase):
@staticmethod
def ridiculouslongquery():
with open('ridiculouslongquery.txt') as f:
return f.read().strip()
def testParser(self):
q = self.ridiculouslongquery()
def doParse():
for i in range(10):
r = parseString(q)
t0 = time()
doParse()
t1 = time()
#profile(doParse, runKCacheGrind = True)
self.assertTiming(0.0058, t1-t0, 0.065) # bugfix with AND NOT (implementation following BNF)
# asserts below are for archeological purposes only.
#self.assertTiming(0.050, t1-t0, 0.054) # side effect of optimizing visitor
#self.assertTiming(0.053, t1-t0, 0.057) # used __slots__ in CqlAbstractNode
#self.assertTiming(0.060, t1-t0, 0.064) # inlined TokenStack (way less code!)
#self.assertTiming(0.074, t1-t0, 0.078) # let re do the tokenizing
#self.assertTiming(0.101, t1-t0, 0.103) # rewrote everything to try/except
#self.assertTiming(0.115, t1-t0, 0.120) # inlined _tryTerm and some _construct
#self.assertTiming(0.132, t1-t0, 0.136) # inlined _tryTerm and some _construct
#self.assertTiming(0.141, t1-t0, 0.149) # optimized _tryTerms
#self.assertTiming(0.155, t1-t0, 0.165) # replaced Stack with []
#self.assertTiming(0.180, t1-t0, 0.190) # start
def testIdentityVisitor(self):
p = parseString(self.ridiculouslongquery())
def doVisit():
for i in range(10):
CqlIdentityVisitor(p).visit()
t0 = time()
doVisit()
t1 = time()
#profile(doVisit, runKCacheGrind = True)
self.assertTiming(0.0032, t1-t0, 0.041) # optimized identityvisitor
# asserts below are for archeological purposes only.
#self.assertTiming(0.050, t1-t0, 0.053) # made visitXYZ() optional
#self.assertTiming(0.064, t1-t0, 0.068) # replaced children() attr access and replaced tuple by list
#self.assertTiming(0.100, t1-t0, 0.110) # start
def testPartialVisitor(self):
class PartialVisitor(CqlVisitor):
def visitINDEX(self, node):
return node.visitChildren(self)
p = parseString(self.ridiculouslongquery())
def doVisit():
for i in range(10):
PartialVisitor(p).visit()
t0 = time()
doVisit()
t1 = time()
#profile(doVisit, runKCacheGrind = True)
self.assertTiming(0.0018, t1-t0, 0.024)
def assertTiming(self, t0, t, t1):
self.assertTrue(t0*T < t < t1*T, t/T)
|
seecr/cqlparser
|
test/speedtest.py
|
Python
|
gpl-2.0
| 4,059
|
[
"VisIt"
] |
fc3af1021d45f7792211ca30153f23185f1cbaf849099d5e23f627383288b608
|
"""
=============================================
SNR estimation for Diffusion-Weighted Images
=============================================
Computing the Signal-to-Noise-Ratio (SNR) of DW images is still an open
question, as SNR depends on the white matter structure of interest as well as
the gradient direction corresponding to each DWI.
In classical MRI, SNR can be defined as the ratio of the mean of the signal
divided by the standard deviation of the underlying Gaussian noise, that is
$SNR = mean(signal) / std(noise)$. The noise standard deviation can be computed
from the background in any of the DW images. How do we compute the mean of the
signal, and what signal?
The strategy here is to compute a 'worst-case' SNR for DWI. Several white
matter structures such as the corpus callosum (CC), corticospinal tract (CST),
or the superior longitudinal fasciculus (SLF) can be easily identified from the
colored-FA (CFA) map. In this example, we will use voxels from the CC, which
have the characteristic of being highly red in the CFA map since they are
mainly oriented in the left-right direction. We know that the DW image closest
to the X-direction will be the one with the most attenuated diffusion signal.
This is the strategy adopted in several recent papers (see [Descoteaux2011]_
and [Jones2013]_). It gives a good indication of the quality of the DWI data.
First, we compute the tensor model in a brain mask (see the :ref:`reconst_dti`
example for further explanations).
"""
from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.affine
print('Computing brain mask...')
b0_mask, mask = median_otsu(data)
print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.
The following lines perform these two operations and then saves the computed mask.
"""
print('Computing worst-case/best-case SNR using the corpus callosum...')
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box
threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])
mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff
CC_box[bounds_min[0]:bounds_max[0],
bounds_min[1]:bounds_max[1],
bounds_min[2]:bounds_max[2]] = 1
mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, threshold,
return_cfa=True)
cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')
import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))
plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
"""
.. figure:: CC_segmentation.png
:align: center
"""
"""Now that we are happy with our crude CC mask that selected voxels in the x-direction,
we can use all the voxels to estimate the mean signal in this region.
"""
mean_signal = np.mean(data[mask_cc_part], axis=0)
"""Now, we need a good background estimation. We will re-use the brain mask
computed before and invert it to catch the outside of the brain. This could
also be determined manually with a ROI in the background.
[Warning: Certain MR manufacturers mask out the outside of the brain with 0's.
One thus has to be careful how the noise ROI is defined].
"""
from scipy.ndimage.morphology import binary_dilation
mask_noise = binary_dilation(mask, iterations=10)
mask_noise[..., :mask_noise.shape[-1]//2] = 1
mask_noise = ~mask_noise
mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine)
nib.save(mask_noise_img, 'mask_noise.nii.gz')
noise_std = np.std(data[mask_noise, :])
print('Noise standard deviation sigma= ', noise_std)
"""We can now compute the SNR for each DWI. For example, report SNR
for DW images with gradient direction that lies the closest to
the X, Y and Z axes.
"""
# Exclude null bvecs from the search
idx = np.sum(gtab.bvecs, axis=-1) == 0
gtab.bvecs[idx] = np.inf
axis_X = np.argmin(np.sum((gtab.bvecs-np.array([1, 0, 0]))**2, axis=-1))
axis_Y = np.argmin(np.sum((gtab.bvecs-np.array([0, 1, 0]))**2, axis=-1))
axis_Z = np.argmin(np.sum((gtab.bvecs-np.array([0, 0, 1]))**2, axis=-1))
for direction in [0, axis_X, axis_Y, axis_Z]:
SNR = mean_signal[direction]/noise_std
if direction == 0 :
print("SNR for the b=0 image is :", SNR)
else :
print("SNR for direction", direction, " ", gtab.bvecs[direction], "is :", SNR)
"""SNR for the b=0 image is : ''42.0695455758''"""
"""SNR for direction 58 [ 0.98875 0.1177 -0.09229] is : ''5.46995373635''"""
"""SNR for direction 57 [-0.05039 0.99871 0.0054406] is : ''23.9329492871''"""
"""SNR for direction 126 [-0.11825 -0.039925 0.99218 ] is : ''23.9965694823''"""
"""
Since the CC is aligned with the X axis, the lowest SNR is for that gradient
direction. In comparison, the DW images in the perpendical Y and Z axes have a
high SNR. The b0 still exhibits the highest SNR, since there is no signal
attenuation.
Hence, we can say the Stanford diffusion data has a 'worst-case' SNR of
approximately 5, a 'best-case' SNR of approximately 24, and a SNR of 42 on the
b0 image.
"""
"""
References
----------
.. [Descoteaux2011] Descoteaux, M., Deriche, R., Le Bihan, D., Mangin, J.-F.,
and Poupon, C. Multiple q-shell diffusion propagator imaging. Medical Image
Analysis, 15(4), 603, 2011.
.. [Jones2013] Jones, D. K., Knosche, T. R., & Turner, R. White Matter
Integrity, Fiber Count, and Other Fallacies: The Dos and Don'ts of Diffusion
MRI. NeuroImage, 73, 239, 2013.
"""
|
nilgoyyou/dipy
|
doc/examples/snr_in_cc.py
|
Python
|
bsd-3-clause
| 6,592
|
[
"Gaussian"
] |
14f9873b97a13754b1d65f2d3bdfbfd91c9054fba0c095569656d43ca6101f73
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-HetSNGP L/32.
"""
# pylint: enable=line-too-long
import ml_collections
import common_fewshot # local file import from baselines.jft.experiments
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
config.seed = 0
config.dataset = 'imagenet21k'
config.val_split = 'full[:102400]'
config.train_split = 'full[102400:]'
config.num_classes = 21843
config.init_head_bias = -10.0
config.trial = 0
config.batch_size = 4096
config.num_epochs = 90
pp_common = '|value_range(-1, 1)'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_train += f'|onehot({config.num_classes}, on=0.9999, off=0.0001)'
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.pp_eval += f'|onehot({config.num_classes})'
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
config.log_training_steps = 10000
config.log_eval_steps = 3003 # ~= steps_per_epoch
# NOTE: Save infrequently to prevent crowding the disk space.
config.checkpoint_steps = 17250
config.checkpoint_timeout = 10
# Model section
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [32, 32]
config.model.hidden_size = 1024
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.1
config.model.transformer.mlp_dim = 4096
config.model.transformer.num_heads = 16
config.model.transformer.num_layers = 24
config.model.classifier = 'token' # Or 'gap'
config.model.representation_size = 1024
# Heteroscedastic
config.het = ml_collections.ConfigDict()
config.het.multiclass = False
config.het.temperature = 1.5
config.het.mc_samples = 1000
config.het.num_factors = 50
config.het.param_efficient = True
# Gaussian process layer section
config.gp_layer = ml_collections.ConfigDict()
# Use momentum-based (i.e., non-exact) covariance update for pre-training.
# This is because the exact covariance update can be unstable for pretraining,
# since it involves inverting a precision matrix accumulated over 300M data.
config.gp_layer.covmat_momentum = .999
config.gp_layer.ridge_penalty = 1.
# No need to use mean field adjustment for pretraining.
config.gp_layer.mean_field_factor = -1.
# Optimizer section
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict()
config.optim.weight_decay = 0.03
config.grad_clip_norm = 1.0
config.optim.beta1 = 0.9
config.optim.beta2 = 0.999
# TODO(lbeyer): make a mini-language like preprocessings.
config.lr = ml_collections.ConfigDict()
# LR has to be lower for GP layer and on larger models.
config.lr.base = 6e-4 # LR has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
# Few-shot eval section
config.fewshot = common_fewshot.get_fewshot()
config.fewshot.log_steps = 50_000
return config
def get_sweep(hyper):
return hyper.product([
hyper.sweep('config.seed', [0]),
hyper.sweep('config.het.temperature',
[1.0, 1.25, 1.5, 1.75, 2.0, 2.5])
])
|
google/uncertainty-baselines
|
baselines/jft/experiments/imagenet21k_vit_l32_hetsngp.py
|
Python
|
apache-2.0
| 3,938
|
[
"Gaussian"
] |
43aa758db9855dc00bf708bfde7b3fcd3406cd18776f46c33285fff666f1cc54
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
import json
import glob
import itertools
import logging
import math
import os
import re
import warnings
from pathlib import Path
import xml.etree.cElementTree as ET
from collections import defaultdict
from io import StringIO
import collections
import numpy as np
from monty.io import zopen, reverse_readfile
from monty.json import MSONable
from monty.json import jsanitize
from monty.re import regrep
from monty.os.path import zpath
from monty.dev import deprecated
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import BandStructure, \
BandStructureSymmLine, get_reconstructed_band_structure
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType, Magmom
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import \
ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Ioannis Petousis, Stephen Dacek, Mark Turiansky"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 30, 2012"
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
elif val_type == "int":
return int(val)
elif val_type == "string":
return val.strip()
else:
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
r"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains *** for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == 'logical':
m = [[True if i == 'T' else False for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
else:
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == '*' * len(f):
warnings.warn('Float overflow (*******) encountered in vasprun')
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: Shyue Ping Ong
"""
def __init__(self, filename, ionic_step_skip=None,
ionic_step_offset=0, parse_dos=True,
parse_eigen=True, parse_projected_eigen=False,
parse_potcar_file=True, occu_tol=1e-8,
exception_on_bad_xml=True):
"""
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
"""
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset::int(ionic_step_skip)]
# add the tailing informat in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(
preamble, to_parse,
steps[-1].split("</calculation>")[-1])
else:
to_parse = "{}<calculation>{}".format(preamble, to_parse)
self._parse(StringIO(to_parse), parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
else:
self._parse(f, parse_dos=parse_dos, parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") != "BSE" and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % \
self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
if not hasattr(self, 'kpoints'):
self.kpoints, self.actual_kpoints, self.actual_kpoints_weights = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == "initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "dielectricfunction":
if ("comment" not in elem.attrib or
elem.attrib["comment"] ==
"INVERSE MACROSCOPIC DIELECTRIC TENSOR (including "
"local field effects in RPA (Hartree))"):
if 'density' not in self.dielectric_data:
self.dielectric_data['density'] = self._parse_diel(
elem)
elif 'velocity' not in self.dielectric_data:
# "velocity-velocity" is also named
# "current-current" in OUTCAR
self.dielectric_data['velocity'] = self._parse_diel(
elem)
else:
raise NotImplementedError(
'This vasprun.xml has >2 unlabelled dielectric '
'functions')
else:
comment = elem.attrib["comment"]
# VASP 6+ has labels for the density and current
# derived dielectric constants
if comment == "density-density":
self.dielectric_data["density"] = self._parse_diel(
elem)
elif comment == "current-current":
self.dielectric_data["velocity"] = self._parse_diel(
elem)
else:
self.other_dielectric[comment] = self._parse_diel(
elem)
elif tag == "varray" and elem.attrib.get("name") == 'opticaltransitions':
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = np.zeros((natoms, natoms, 3, 3), dtype='double')
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i * 3:(i + 1) * 3, j * 3:(j + 1) * 3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.append(np.array(ev).reshape(natoms, 3))
self.normalmode_eigenvals = np.array(eigenvalues)
self.normalmode_eigenvecs = np.array(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
else:
warnings.warn(
"XML is malformed. Parsing has stopped but partial data"
"is available.", UserWarning)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
"""
Returns:
List of Structure objects for the structure at each ionic step.
"""
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant without any local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
Returns:
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
"""
Returns:
The real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
"""
return self.dielectric_data['density']
@property
def optical_absorption_coeff(self):
"""
Calculate the optical absorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absorption coefficient in list
"""
if self.dielectric_data["density"]:
real_avg = [sum(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
imag_avg = [sum(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
def f(freq, real, imag):
"""
The optical absorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = np.sqrt(np.sqrt(real ** 2 + imag ** 2) - real) * np.sqrt(2) / hbar * freq
return coeff
absorption_coeff = [f(freq, real, imag) for freq, real, imag in
zip(self.dielectric_data["density"][0], real_avg, imag_avg)]
return absorption_coeff
@property
def converged_electronic(self):
"""
Returns:
True if electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if 'LEPSILON' in self.incar and self.incar['LEPSILON']:
i = 1
to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Returns:
True if ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns:
True if a relaxation run is converged both ionically and
electronically.
"""
return self.converged_electronic and self.converged_ionic
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep['electronic_steps'][-1]["e_0_energy"]:
warnings.warn("Final e_wo_entrp differs from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances.")
return final_istep["e_wo_entrp"]
return final_istep['electronic_steps'][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn("Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned.")
return float('inf')
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and all
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
elif sum(us) == 0 and sum(js) == 0:
return {}
else:
raise VaspParserError("Length of U value parameters and atomic "
"symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs.
TODO: Fix for other functional types like PW91, other vdW types, etc.
"""
GGA_TYPES = {"RE": "revPBE", "PE": "PBE", "PS": "PBESol", "RP": "RevPBE+PADE", "AM": "AM05", "OR": "optPBE",
"BO": "optB88", "MK": "optB86b", "--": "GGA"}
METAGGA_TYPES = {"TPSS": "TPSS", "RTPSS": "revTPSS", "M06L": "M06-L", "MBJ": "modified Becke-Johnson",
"SCAN": "SCAN", "MS0": "MadeSimple0", "MS1": "MadeSimple1", "MS2": "MadeSimple2"}
if self.parameters.get("AEXX", 1.00) == 1.00:
rt = "HF"
elif self.parameters.get("HFSCREEN", 0.30) == 0.30:
rt = "HSE03"
elif self.parameters.get("HFSCREEN", 0.20) == 0.20:
rt = "HSE06"
elif self.parameters.get("AEXX", 0.20) == 0.20:
rt = "B3LYP"
elif self.parameters.get("LHFCALC", True):
rt = "PBEO or other Hybrid Functional"
elif self.parameters.get("LUSE_VDW", False):
if self.incar.get("METAGGA", "").strip().upper() in METAGGA_TYPES:
rt = METAGGA_TYPES[self.incar.get("METAGGA", "").strip().upper()] + "+rVV10"
else:
rt = GGA_TYPES[self.parameters.get("GGA", "").strip().upper()] + "+rVV10"
elif self.incar.get("METAGGA", "").strip().upper() in METAGGA_TYPES:
rt = METAGGA_TYPES[self.incar.get("METAGGA", "").strip().upper()]
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
elif self.potcar_symbols[0].split()[0] == 'PAW':
rt = "LDA"
elif self.parameters.get("GGA", "").strip().upper() in GGA_TYPES:
rt = GGA_TYPES[self.parameters.get("GGA", "").strip().upper()]
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return sum(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(self, inc_structure=True, parameters=None,
data=None):
"""
Returns a ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {"is_hubbard", "hubbards", "potcar_symbols",
"potcar_spec", "run_type"}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(self.final_structure,
self.final_energy, parameters=params,
data=data)
else:
return ComputedEntry(self.final_structure.composition,
self.final_energy, parameters=params,
data=data)
def get_band_structure(self, kpoints_filename=None, efermi=None,
line_mode=False, force_hybrid_mode=False):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename (str): Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
determine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi (float): If you want to specify manually the fermi energy
this is where you should do it. By default, the None value
means the code will get it from the vasprun.
line_mode (bool): Force the band structure to be considered as
a run along symmetry lines.
force_hybrid_mode (bool): Makes it possible to read in self-consistent band structure calculations for
every type of functional
Returns:
a BandStructure object (or more specifically a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = zpath(
os.path.join(os.path.dirname(self.filename), 'KPOINTS'))
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError('KPOINTS needed to obtain band structure '
'along symmetry lines.')
if efermi is None:
efermi = self.efermi
kpoint_file = None
if os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.final_structure.lattice.reciprocal_lattice.matrix)
kpoints = [np.array(self.actual_kpoints[i])
for i in range(len(self.actual_kpoints))]
p_eigenvals = defaultdict(list)
eigenvals = defaultdict(list)
nkpts = len(kpoints)
for spin, v in self.eigenvalues.items():
v = np.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS input, we need band, kpoints, orb, ion.
peigen = np.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = np.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(min_eigenvalues):
# p_eigenvals[spin].append(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get('LHFCALC', False) or \
0. in self.actual_kpoints_weights:
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band or force_hybrid_mode:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = \
kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts]
for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][
start_bs_index:nkpts]
for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts]
for i in range(nbands)]
eigenvals = {Spin.up: up_eigen, Spin.down: down_eigen}
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [p_eigenvals[Spin.down][i][
start_bs_index:nkpts]
for i in range(nbands)]
else:
eigenvals = {Spin.up: up_eigen}
else:
if '' in kpoint_file.labels:
raise Exception("A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file")
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(kpoints, eigenvals, lattice_new,
efermi, labels_dict,
structure=self.final_structure,
projections=p_eigenvals)
else:
return BandStructure(kpoints, eigenvals, lattice_new, efermi,
structure=self.final_structure,
projections=p_eigenvals)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def get_potcars(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar from path.
"""
def get_potcar_in_path(p):
for fn in os.listdir(os.path.abspath(p)):
if fn.startswith('POTCAR'):
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == \
{sym for sym in self.potcar_symbols}:
return pc
warnings.warn("No POTCAR file with matching TITEL fields"
" was found in {}".format(os.path.abspath(p)))
if isinstance(path, (str, Path)):
path = str(path)
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != \
{sym for sym in self.potcar_symbols}:
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.split(self.filename)[0])
else:
potcar = None
return potcar
def update_potcar_spec(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar spec from path.
"""
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar if
ps.symbol == sym.split()[1]]
def update_charge_from_potcar(self, path):
"""
Sets the charge of a structure based on the POTCARs found.
:param path: Path to search for POTCARs
"""
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
if len(potcar) == len(self.initial_structure.composition.element_composition):
potcar_nelect = sum([
self.initial_structure.composition.element_composition[ps.element] * ps.ZVAL
for ps in potcar])
else:
nums = [len(list(g)) for _, g in
itertools.groupby(self.atomic_symbols)]
potcar_nelect = sum(ps.ZVAL * num for ps, num in
zip(potcar, nums))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["nkpoints"] = len(actual_kpts)
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
nsites = len(self.final_structure)
try:
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
except (ArithmeticError, TypeError):
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = {str(spin): v.tolist()
for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout['projected_eigenvalues'] = {
str(spin): v.tolist()
for spin, v in self.projected_eigenvalues.items()}
vout['epsilon_static'] = self.epsilon_static
vout['epsilon_static_wolfe'] = self.epsilon_static_wolfe
vout['epsilon_ionic'] = self.epsilon_ionic
d['output'] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val,
self.filename, name)
elem.clear()
return Incar(params)
def _parse_atominfo(self, elem):
for a in elem.findall("array"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip()
for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findall("c")[4].text.strip()
for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
elif symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for
sym in atomic_symbols], potcar_symbols
def _parse_kpoints(self, elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(
e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findall("v"):
name = v.attrib.get("name")
toks = v.text.split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findall("varray"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_varray(va)
elif name == "weights":
weights = [i[0] for i in _parse_varray(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints, kpts_weights=weights)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_varray(elem.find("crystal").find("varray"))
pos = _parse_varray(elem.find("varray"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("varray/[@name='selective']")
if sdyn:
struct.add_site_property('selective_dynamics',
_parse_varray(sdyn))
return struct
def _parse_diel(self, elem):
imag = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("imag").find("array").find("set").findall("r")]
real = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("real").find("array").find("set").findall("r")]
elem.clear()
return [e[0] for e in imag], \
[e[1:] for e in real], [e[1:] for e in imag]
def _parse_optical_transition(self, elem):
for va in elem.findall("varray"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions array contains oscillator strength and probability of transition
oscillator_strength = np.array(_parse_varray(va))[0:, ]
probability_transition = np.array(_parse_varray(va))[0:, 1]
return oscillator_strength, probability_transition
def _parse_chemical_shielding_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.append(istep)
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
cur_ene = d['e_fr_energy']
min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= min_steps:
calculation[-1]["electronic_steps"].append(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if abs(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].append(d)
else:
calculation.append({"electronic_steps": [d]})
except AttributeError: # not all calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text)
for i in elem.find("energy").findall("i")}
except AttributeError: # not all calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
esteps.append(d)
except AttributeError: # not all calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
def _parse_dos(self, elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("array").find("set").findall("set"):
data = np.array(_parse_varray(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("array").findall("field")]
orbs.pop(0)
lm = any(["x" in s for s in orbs])
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else \
Spin.down
data = np.array(_parse_varray(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.append(pdos)
elem.clear()
return Dos(efermi, energies, tdensities), Dos(efermi, energies, idensities), pdoss
def _parse_eigen(self, elem):
eigenvalues = defaultdict(list)
for s in elem.find("array").find("set").findall("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findall("set"):
eigenvalues[spin].append(_parse_varray(ss))
eigenvalues = {spin: np.array(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
def _parse_projected_eigen(self, elem):
root = elem.find("array").find("set")
proj_eigen = defaultdict(list)
for s in root.findall("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
spin = Spin.up if spin == 1 else Spin.down
for kpt, ss in enumerate(s.findall("set")):
dk = []
for band, sss in enumerate(ss.findall("set")):
db = _parse_varray(sss)
dk.append(db)
proj_eigen[spin].append(dk)
proj_eigen = {spin: np.array(v) for spin, v in proj_eigen.items()}
elem.clear()
return proj_eigen
def _parse_dynmat(self, elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findall("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.split()]
for va in elem.findall("varray"):
if va.attrib["name"] == "hessian":
for v in va.findall("v"):
hessian.append([float(i) for i in v.text.split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findall("v"):
eigenvectors.append([float(i) for i in v.text.split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(self, filename, parse_projected_eigen=False,
parse_potcar_file=False, occu_tol=1e-8):
"""
Args:
filename (str): Filename to parse
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
"""
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, self.actual_kpoints_weights = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout['projected_eigenvalues'] = peigen
d['output'] = vout
return jsanitize(d, strict=True)
class Outcar:
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differently than most of the other
VaspObjects, since the OUTCAR can be very different depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shielding
chemical shielding on each ion as a dictionary with core and valence contributions
.. attribute:: unsym_cs_tensor
Unsymmetrized chemical shielding tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: cs_g0_contribution
G=0 contribution to chemical shielding. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shielding. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 array matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total array size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
..attribute: final_energy_contribs
Individual contributions to the total final energy as a dictionary.
Include contirbutions from keys, e.g.:
{'DENC': -505778.5184347, 'EATOM': 15561.06492564, 'EBANDS': -804.53201231,
'EENTRO': -0.08932659, 'EXHF': 0.0, 'Ediel_sol': 0.0,
'PAW double counting': 664.6726974100002, 'PSCENC': 742.48691646,
'TEWEN': 489742.86847338, 'XCENC': -169.64189814}
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: Rickard Armiento, Shyue Ping Ong
"""
def __init__(self, filename):
"""
Args:
filename (str): OUTCAR filename to parse.
"""
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+("
r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
run_stats[tok[0].strip()] = float(tok[1].strip())
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None,
run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i)
for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith('tot'):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
elif re.search("electrostatic", clean):
read_charge, read_mag_x, read_mag_y, read_mag_z = False, False, False, False
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append({
key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]])
for key in mag_x[0].keys()
})
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats['cores'] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats['cores'] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read the drift:
self.read_pattern({
"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float)
self.drift = self.data.get('drift', [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({'spin': 'ISPIN = 2'})
if self.data.get('spin', []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({'noncollinear': 'LNONCOLLINEAR = T'})
if self.data.get('noncollinear', []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern({'ibrion': r"IBRION =\s+([\-\d]+)"},
terminate_on_match=True,
postprocess=int)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({'epsilon': 'LEPSILON= T'})
if self.data.get('epsilon', []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({'calcpol': 'LCALCPOL = T'})
if self.data.get('calcpol', []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.read_pattern({
'electrostatic': r"average \(electrostatic\) potential at core"})
if self.data.get('electrostatic', []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
self.has_onsite_density_matrices = False
self.read_pattern({"has_onsite_density_matrices": r"onsite density matrix"},
terminate_on_match=True)
if "has_onsite_density_matrices" in self.data:
self.has_onsite_density_matrices = True
self.read_onsite_density_matrices()
# Store the individual contributions to the final total energy
final_energy_contribs = {}
for k in ["PSCENC", "TEWEN", "DENC", "EXHF", "XCENC", "PAW double counting",
"EENTRO", "EBANDS", "EATOM", "Ediel_sol"]:
if k == "PAW double counting":
self.read_pattern({k: r"%s\s+=\s+([\.\-\d]+)\s+([\.\-\d]+)" % (k)})
else:
self.read_pattern({k: r"%s\s+=\s+([\d\-\.]+)" % (k)})
if not self.data[k]:
continue
final_energy_contribs[k] = sum([float(f) for f in self.data[k][-1]])
self.final_energy_contribs = final_energy_contribs
def read_pattern(self, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
r"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(self, header_pattern, row_pattern, footer_pattern,
postprocess=str, attribute_name=None,
last_one_only=True):
r"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, 'rt') as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
# skip empty lines
if not ml:
continue
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s*([\.\-\d]+)+", pots)
self.electrostatic_potential = [float(f) for f in pots]
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
plasma_pattern = r"plasma frequency squared.*"
dielectric_pattern = r"frequency dependent\s+IMAGINARY " \
r"DIELECTRIC FUNCTION \(independent particle, " \
r"no local field effects\)(\sdensity-density)*$"
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 3)
plasma_frequencies = collections.defaultdict(list)
read_plasma = False
read_dielectric = False
energies = []
data = {"REAL": [], "IMAGINARY": []}
count = 0
component = "IMAGINARY"
with zopen(self.filename, "rt") as f:
for l in f:
l = l.strip()
if re.match(plasma_pattern, l):
read_plasma = "intraband" if "intraband" in l else "interband"
elif re.match(dielectric_pattern, l):
read_plasma = False
read_dielectric = True
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
if read_plasma and re.match(row_pattern, l):
plasma_frequencies[read_plasma].append(
[float(t) for t in l.strip().split()])
elif read_dielectric:
if re.match(row_pattern, l.strip()):
toks = l.strip().split()
if component == "IMAGINARY":
energies.append(float(toks[0]))
xx, yy, zz, xy, yz, xz = [float(t) for t in toks[1:]]
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].append(matrix)
elif re.match(r"\s*-+\s*", l):
count += 1
if count == 2:
component = "REAL"
elif count == 3:
break
self.plasma_frequencies = {k: np.array(v[:3])
for k, v in plasma_frequencies.items()}
self.dielectric_energies = np.array(energies)
self.dielectric_tensor_function = np.array(data["REAL"]) + 1j * np.array(data["IMAGINARY"])
@property # type: ignore
@deprecated(message="frequencies has been renamed to dielectric_energies.")
def frequencies(self):
"""
Renamed to dielectric energies.
"""
return self.dielectric_energies
def read_chemical_shielding(self):
"""
Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, " \
r"285 \(1993\)\)\s+" \
r"\s+-{50,}\s+" \
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+" \
r"\s+-{20,}\s+-{20,}\s+" \
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+" \
r"-{50,}\s*$"
first_part_pattern = r"\s+\(absolute, valence only\)\s+$"
swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r'\s+'.join(
[r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
h2 = header_pattern + swallon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
all_cs = {}
for name, cs_table in [["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core]]:
all_cs[name] = cs_table
self.data["chemical_shielding"] = all_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n' \
r'^\s+-{50,}$\n' \
r'^\s+BDIR\s+X\s+Y\s+Z\s*$\n' \
r'^\s+-{50,}\s*$\n'
row_pattern = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 3)
footer_pattern = r'\s+-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="cs_g0_contribution")
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+Core NMR properties\s*$\n' \
r'\n' \
r'^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n' \
r'^\s+-{20,}$\n'
row_pattern = r'\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)'
footer_pattern = r'\s+-{20,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=str,
last_one_only=True, attribute_name="cs_core_contribution")
core_contrib = {d['element']: float(d['shift'])
for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" \
r"\s+Absolute Chemical Shift tensors\s+" \
r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, 'rt') as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + r"\s*^(?P<table_body>(?:\s*" + row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text,
re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.append(processed_line)
unsym_tensors.append(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg_tensor(self):
"""
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
"""
header_pattern = r'Electric field gradients \(V/A\^2\)\n' \
r'-*\n' \
r' ion\s+V_xx\s+V_yy\s+V_zz\s+V_xy\s+V_xz\s+V_yz\n' \
r'-*\n'
row_pattern = r'\d+\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)'
footer_pattern = r'-*\n'
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data["unsym_efg_tensor"] = tensors
return tensors
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient interpretted values.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = r'^\s+NMR quadrupolar parameters\s+$\n' \
r'^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n' \
r'^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n' \
r'^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n' \
r'^-{50,}$\n' \
r'^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n' \
r'^-{50,}\s*$\n'
row_pattern = r'\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+' \
r'(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)'
footer_pattern = r'-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="efg")
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 array corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" \
r"Direction\s+([X-Z][X-Z]\s+)+" \
r"\-+"
row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " \
r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_onsite_density_matrices(self):
"""
Parse the onsite density matrices, returns list with index corresponding
to atom index in Structure.
"""
# matrix size will vary depending on if d or f orbitals are present
# therefore regex assumes f, but filter out None values if d
header_pattern = r"spin component 1\n"
row_pattern = r'[^\S\r\n]*(?:([\d.-]+))' + r'(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?' * 6 + r'.*?'
footer_pattern = r"\nspin component 2"
spin1_component = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=lambda x: float(x) if x else None,
last_one_only=False)
# filter out None values
spin1_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin1_component]
# and repeat for Spin.down
header_pattern = r"spin component 2\n"
row_pattern = r'[^\S\r\n]*(?:([\d.-]+))' + r'(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?' * 6 + r'.*?'
footer_pattern = r"\n occupancies and eigenvectors"
spin2_component = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=lambda x: float(x) if x else None,
last_one_only=False)
spin2_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin2_component]
self.data["onsite_density_matrices"] = [
{
Spin.up: spin1_component[idx],
Spin.down: spin2_component[idx]
}
for idx in range(len(spin1_component))
]
def read_corrections(self, reverse=True, terminate_on_match=True):
"""
Reads the dipol qudropol corrections into the
Outcar.data["dipol_quadrupol_correction"].
:param reverse: Whether to start from end of OUTCAR.
:param terminate_on_match: Whether to terminate once match is found.
"""
patterns = {
"dipol_quadrupol_correction": r"dipol\+quadrupol energy "
r"correction\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=float)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terminate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normal
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match. Defaults to True here since we usually
want only the final value.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern. Defaults to True here
since we usually want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+"
r"([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=str)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(
self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i)) for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i)) for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except Exception:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_internal_strain_tensor(self):
"""
Reads the internal strain tensor and populates self.internal_strain_tensor with an array of voigt notation
tensors for each site.
"""
search = []
def internal_strain_start(results, match):
results.internal_strain_ion = int(match.group(1)) - 1
results.internal_strain_tensor.append(np.zeros((3, 6)))
search.append([r"INTERNAL STRAIN TENSOR FOR ION\s+(\d+)\s+for displacements in x,y,z \(eV/Angst\):",
None, internal_strain_start])
def internal_strain_data(results, match):
if match.group(1).lower() == "x":
index = 0
elif match.group(1).lower() == "y":
index = 1
elif match.group(1).lower() == "z":
index = 2
else:
raise Exception(
"Couldn't parse row index from symbol for internal strain tensor: {}".format(match.group(1)))
results.internal_strain_tensor[results.internal_strain_ion][index] = np.array([float(match.group(i))
for i in range(2, 8)])
if index == 2:
results.internal_strain_ion = None
search.append([r"^\s+([x,y,z])\s+" + r"([-]?\d+\.\d+)\s+" * 6,
lambda results, line: results.internal_strain_ion is not None,
internal_strain_data])
self.internal_strain_ion = None
self.internal_strain_tensor = []
micro_pyawk(self.filename, search, self)
def read_lepsilon(self):
"""
Reads an LEPSILON run.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop])
self.dielectric_index = None
self.dielectric_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.append([r"PIEZOELECTRIC TENSOR for field in x, y, z "
r"\(C/m\^2\)",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0
if results.piezo_index is not None
else None,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.piezo_index >= 1
if results.piezo_index is not None
else None,
piezo_section_stop])
self.piezo_index = None
self.piezo_tensor = np.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.append([r"BORN EFFECTIVE CHARGES ",
None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.append(np.zeros((3, 3)))
search.append([r"ion +([0-9]+)", lambda results, line: results.born_ion is not None, born_ion])
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = \
np.array([float(match.group(i)) for i in range(2, 5)])
search.append(
[r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0
if results.born_ion is not None
else results.born_ion,
born_data])
def born_section_stop(results, match):
results.born_ion = None
search.append(
[r"-------------------------------------",
lambda results, line: results.born_ion >= 1
if results.born_ion is not None
else results.born_ion,
born_section_stop])
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = np.array(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except Exception:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
"""
Reads an LEPSILON run, the ionic component.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_ionic_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop])
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.append([r"PIEZOELECTRIC TENSOR IONIC CONTR for field in "
r"x, y, z ",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_ionic_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.append(
["-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop])
self.piezo_ionic_index = None
self.piezo_ionic_tensor = np.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except Exception:
raise Exception(
"ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
"""
Reads the lcalpol.
# TODO: Document the actual variables.
"""
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_elec])
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp1])
def p_sp2(results, match):
results.p_sp2 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp2])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Ionic dipole moment: *p\[ion\]="
r"\( *([-0-9.Ee+]*)"
r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, p_ion])
micro_pyawk(self.filename, search, self)
except Exception:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
try:
def atom_symbols(results, match):
element_symbol = match.group(1)
if not hasattr(results, 'atom_symbols'):
results.atom_symbols = []
results.atom_symbols.append(element_symbol.strip())
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findall(r'-?\d+\.\d*', zvals))
search = []
search.append([r'(?<=VRHFIN =)(.*)(?=:)', None, atom_symbols])
search.append([r'^\s+ZVAL.*=(.*)', None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x, y in zip(self.atom_symbols, self.zvals):
zval_dict.update({x: y})
self.zval_dict = zval_dict
# Clean-up
del (self.atom_symbols)
del (self.zvals)
except Exception:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over all ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].append(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a = iter(iterable)
return zip(a, a)
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the norm of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.append(ap)
break
data = line.split()
# the average core potentials of up to 5 elements are
# given per line
for i, pot in pairwise(data):
ap.append(float(pot))
return aps
def as_dict(self):
"""
:return: MSONAble dict.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"run_stats": self.run_stats, "magnetization": self.magnetization,
"charge": self.charge, "total_magnetization": self.total_mag,
"nelect": self.nelect, "is_stopped": self.is_stopped,
"drift": self.drift, "ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential}
if self.lepsilon:
d.update({"piezo_tensor": self.piezo_tensor,
"dielectric_tensor": self.dielectric_tensor,
"born": self.born})
if self.dfpt:
d.update({"internal_strain_tensor": self.internal_strain_tensor})
if self.dfpt and self.lepsilon:
d.update({"piezo_ionic_tensor": self.piezo_ionic_tensor,
"dielectric_ionic_tensor": self.dielectric_ionic_tensor})
if self.lcalcpol:
d.update({'p_elec': self.p_elec,
'p_ion': self.p_ion})
if self.spin and not self.noncollinear:
d.update({'p_sp1': self.p_sp1,
'p_sp2': self.p_sp2})
d.update({'zval_dict': self.zval_dict})
if self.nmr_cs:
d.update({"nmr_cs": {"valence and core": self.data["chemical_shielding"]["valence_and_core"],
"valence_only": self.data["chemical_shielding"]["valence_only"],
"g0": self.data["cs_g0_contribution"],
"core": self.data["cs_core_contribution"],
"raw": self.data["unsym_cs_tensor"]}})
if self.nmr_efg:
d.update({"nmr_efg": {"raw": self.data["unsym_efg_tensor"],
"parameters": self.data["efg"]}})
if self.has_onsite_density_matrices:
# cast Spin to str for consistency with electronic_structure
# TODO: improve handling of Enum (de)serialization in monty
onsite_density_matrices = [{str(k): v for k, v in d.items()}
for d in self.data["onsite_density_matrices"]]
d.update({"onsite_density_matrices": onsite_density_matrices})
return d
def read_fermi_contact_shift(self):
"""
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
"""
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+" \
r"\s*\-+"
row_pattern1 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(header_pattern1, row_pattern1,
footer_pattern, postprocess=float,
last_one_only=True)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+" \
r"\s*\-+"
row_pattern2 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 6)
dh_table = self.read_table_pattern(header_pattern2, row_pattern2,
footer_pattern, postprocess=float,
last_one_only=True)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+" \
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+" \
r"\s*\-+"
row_pattern3 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 4)
th_table = self.read_table_pattern(header_pattern3, row_pattern3,
footer_pattern, postprocess=float,
last_one_only=True)
fc_shift_table = {'fch': fch_table, 'dh': dh_table, 'th': th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(MSONable):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: np.array}. The string are "total"
and "diff", in accordance to the output format of vasp LOCPOT and
CHGCAR files where the total spin density is written first, followed
by the difference spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typically, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to allow
summation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typically augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between sums,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentially, this provides the actual Spin.up and Spin.down data
instead of the total and diff. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = dict()
spin_data[Spin.up] = 0.5 * (self.data["total"] +
self.data.get("diff", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] -
self.data.get("diff", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add__(self, other):
return self.linear_add(other, 1.0)
def __sub__(self, other):
return self.linear_add(other, -1.0)
def copy(self):
"""
:return: Copy of Volumetric object
"""
return VolumetricData(
self.structure,
{k: v.copy() for k, v in self.data.items()},
distance_matrix=self._distance_matrix,
data_aug=self.data_aug
)
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
warnings.warn("Structures are different. Make sure you know what "
"you are doing...")
if self.data.keys() != other.data.keys():
raise ValueError("Data have different keys! Maybe one is spin-"
"polarized and the other is not?")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
toks = line.split()
for tok in toks:
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
x = data_count % dim[0]
y = int(math.floor(data_count / dim[0])) % dim[1]
z = int(math.floor(data_count / dim[0] / dim[1]))
dataset[x, y, z] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {"total": all_dataset[0], "diff_x": all_dataset[1],
"diff_y": all_dataset[2], "diff_z": all_dataset[3]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None)}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"],
data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None)}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their input files as far as possible.
:param f: float
:return: str
"""
s = "{:.10E}".format(f)
if f >= 0:
return "0." + s[0] + s[2:12] + 'E' + "{:+03}".format(int(s[13:]) + 1)
else:
return "-." + s[1] + s[3:13] + 'E' + "{:+03}".format(int(s[14:]) + 1)
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, 'name', p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(" {} {} {}\n".format(a[0], a[1], a[2]))
for (k, j, i) in itertools.product(list(range(a[2])),
list(range(a[1])),
list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.append(" ")
if count % 5 != 0:
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("diff_x")
write_spin("diff_y")
write_spin("diff_z")
elif self.is_spin_polarized:
write_spin("diff")
def get_integrated_diff(self, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = np.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or \
self._distance_matrix[ind]["max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(
coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {"max_radius": radius,
"data": np.array(sites_dist)}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(np.mod(list(data[inds, 0]), 1) *
np.tile(a, (len(dists), 1))).astype(int)
vals = [self.data["diff"][x, y, z] for x, y, z in data_inds]
hist, edges = np.histogram(dists, bins=nbins,
range=[0, radius],
weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0:i + 1]) / self.ngridpts
for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype='float')
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species),),
dtype="i")
ds[...] = np.array([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape,
dtype='float')
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species),),
dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype='float')
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename):
"""
Reads VolumetricData from HDF5 file.
:param filename: Filename
:return: VolumetricData
"""
import h5py
with h5py.File(filename, "r") as f:
data = {k: np.array(v) for k, v in f["vdata"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return cls(structure=structure, data=data)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
self.name = poscar.comment
@staticmethod
def from_file(filename):
"""
Reads a LOCPOT file.
:param filename: Filename
:return: Locpot
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Locpot(poscar, data)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
"""
def __init__(self, poscar, data, data_aug=None):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data, data_aug=data_aug)
self.poscar = poscar
self.name = poscar.comment
self._distance_matrix = {}
@staticmethod
def from_file(filename):
"""
Reads a CHGCAR file.
:param filename: Filename
:return: Chgcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
"""
:return: Net magnetization from Chgcar
"""
if self.is_spin_polarized:
return np.sum(self.data['diff'])
else:
return None
class Elfcar(VolumetricData):
"""
Read an ELFCAR file which contains the Electron Localization Function (ELF)
as calculated by VASP.
For ELF, "total" key refers to Spin.up, and "diff" refers to Spin.down.
This also contains information on the kinetic energy density.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
# TODO: modify VolumetricData so that the correct keys can be used.
# for ELF, instead of "total" and "diff" keys we have
# "Spin.up" and "Spin.down" keys
# I believe this is correct, but there's not much documentation -mkhorton
self.data = data
@classmethod
def from_file(cls, filename):
"""
Reads a ELFCAR file.
:param filename: Filename
:return: Elfcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data)
def get_alpha(self):
"""
Get the parameter alpha where ELF = 1/(1+alpha^2).
"""
alpha_data = {}
for k, v in self.data.items():
alpha = 1 / v
alpha = alpha - 1
alpha = np.sqrt(alpha)
alpha_data[k] = alpha
return VolumetricData(self.structure, alpha_data)
class Procar:
"""
Object for reading a PROCAR file.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but all indices are converted to 0-based here.::
{
spin: nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.array of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, where present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing PROCAR.
"""
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(
r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of "
r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
weights = None
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.split()
headers.pop(0)
headers.pop(-1)
def f():
return np.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(f)
def f2():
return np.full((nkpoints, nbands, nions, len(headers)),
np.NaN, dtype=np.complex128)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.split()
index = int(toks.pop(0)) - 1
num_data = np.array([float(t) for t in toks[:len(headers)]])
if not done:
data[spin][current_kpoint, current_band, index, :] = num_data
else:
if len(toks) > len(headers):
# new format of PROCAR (vasp 5.4.4)
num_data = np.array([float(t)
for t in toks[:2 * len(headers)]])
for orb in range(len(headers)):
phase_factors[spin][current_kpoint, current_band,
index, orb] = complex(num_data[2 * orb], num_data[2 * orb + 1])
else:
# old format of PROCAR (vasp 5.4.1 and before)
if np.isnan(phase_factors[spin][current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band, index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band, index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None])
for spin, d in self.data.items()}
class Oszicar:
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
where electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost all runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename of file to parse
"""
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)$")
ionic_mag_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)")
ionic_MD_pattern = re.compile(r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)")
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header == "N" or header == "ncg":
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).split()
data = {header[i]: smart_convert(header[i], toks[i])
for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.append([data])
else:
electronic_steps[-1].append(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4))})
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5))})
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.append({"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8))})
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def all_energies(self):
"""
Compilation of all energies from all electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
all_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.append(self.ionic_steps[i]["F"])
all_energies.append(tuple(energies))
return tuple(all_energies)
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
"""
:return: MSONable dict
"""
return {"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sorted_branch_dir_names = sorted(branch_dir_names, key=lambda x: int(x.split("_")[-1]))
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}".format(dir_name, xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections) \
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None
class Xdatcar:
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: Ram Balachandran
"""
def __init__(self, filename, ionicstep_start=1,
ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of input XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def concatenate(self, filename, ionicstep_start=1,
ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be concatenated.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1,
ionicstep_end=None,
significant_figures=8):
"""
Write Xdatcar class to a string.
Args:
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
significant_figures (int): Number of significant figures.
"""
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if ionicstep_end is not None and ionicstep_end < 1:
raise Exception('End ionic step cannot be less than 1')
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = "{{:.{0}f}}".format(significant_figures)
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat:
"""
Object for reading a DYNMAT file.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: Patrick Huck
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing DYNMAT
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[
0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]['dispvec'] = v[2:]
else:
if 'dynmat' not in self.data[atom][disp]:
self.data[atom][disp]['dynmat'] = []
self.data[atom][disp]['dynmat'].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.iteritems():
for v1 in v0.itervalues():
vec = map(abs, v1['dynmat'][k - 1])
frequency = math.sqrt(sum(vec)) * 2. * math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. For non-spin-polarized, the first index corresponds
to the kpoint and the second corresponds to the band (e.g.
self.coeffs[kp][b] corresponds to k-point kp and band b). For
spin-polarized calculations, the first index is for the spin.
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
R. M. Feenstra and M. Widom from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: Mark Turiansky
"""
def __init__(self, filename='WAVECAR', verbose=False, precision='normal'):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
"""
self.filename = filename
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, 'rb') as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3) \
.astype(np.int)
if verbose:
print('recl={}, spin={}, rtag={}'.format(recl, spin, rtag))
recl8 = int(recl / 8)
self.spin = spin
# check that ISPIN wasn't set to 2
# if spin == 2:
# raise ValueError('spin polarization not currently supported')
# check to make sure we have precision correct
if rtag != 45200 and rtag != 45210:
raise ValueError('invalid rtag of {}'.format(rtag))
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64,
count=3).astype(np.int)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print('kpoints = {}, bands = {}, energy cutoff = {}, fermi '
'energy= {:.04f}\n'.format(self.nk, self.nb, self.encut,
self.efermi))
print('primitive lattice vectors = \n{}'.format(self.a))
self.vol = np.dot(self.a[0, :],
np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print('volume = {}\n'.format(self.vol))
# calculate reciprocal lattice
b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
b = 2 * np.pi * b / self.vol
self.b = b
if verbose:
print('reciprocal lattice vectors = \n{}'.format(b))
print('reciprocal lattice vector magnitudes = \n{}\n'
.format(np.linalg.norm(b, axis=1)))
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print('max number of G values = {}\n\n'.format(self._nbmax))
self.ng = self._nbmax * 3 if precision.lower()[0] == 'n' else \
self._nbmax * 4
# padding
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
# np.set_printoptions(precision=7, suppress=True)
self.Gpoints = [None for _ in range(self.nk)]
self.kpoints = []
if spin == 2:
self.coeffs = [[[None for i in range(self.nb)]
for j in range(self.nk)] for _ in range(spin)]
self.band_energy = [[] for _ in range(spin)]
else:
self.coeffs = [[None for i in range(self.nb)]
for j in range(self.nk)]
self.band_energy = []
for ispin in range(spin):
if verbose:
print('reading spin {}'.format(ispin))
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
if ispin == 0:
self.kpoints.append(kpoint)
else:
assert np.allclose(self.kpoints[ink], kpoint)
if verbose:
print('kpoint {: 4} with {: 5} plane waves at {}'
.format(ink, nplane, kpoint))
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64,
count=3 * self.nb).reshape((self.nb, 3))
if spin == 2:
self.band_energy[ispin].append(enocc)
else:
self.band_energy.append(enocc)
if verbose:
print(enocc[:, [0, 2]])
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * self.nb))
# generate G integers
self.Gpoints[ink] = self._generate_G_points(kpoint)
if len(self.Gpoints[ink]) != nplane:
raise ValueError('failed to generate the correct '
'number of G points')
# extract coefficients
for inb in range(self.nb):
if rtag == 45200:
data = np.fromfile(f, dtype=np.complex64, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - nplane)
elif rtag == 45210:
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = np.fromfile(f, dtype=np.complex128, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - 2 * nplane)
if spin == 2:
self.coeffs[ispin][ink][inb] = data
else:
self.coeffs[ink][inb] = data
def _generate_nbmax(self):
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / (bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut * self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / (bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut * self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / (bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut * self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0).astype(np.int)
def _generate_G_points(self, kpoint):
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
Returns:
a list containing valid G-points
"""
gpoints = []
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(2 * self._nbmax[0] + 1):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
return np.array(gpoints, dtype=np.float64)
def evaluate_wavefunc(self, kpoint, band, r, spin=0):
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by summing over the components. Formally,
\psi_n^k (r) = \sum_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
where \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
r (np.array): the position where the wavefunction will be evaluated
spin (int): spin index for the desired wavefunction (only for
ISPIN = 2, default = 0)
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = np.dot(np.dot(v, self.b), r)
c = self.coeffs[spin][kpoint][band] if self.spin == 2 else \
self.coeffs[kpoint][band]
return np.sum(np.dot(c, np.exp(1j * u, dtype=np.complex64))) / np.sqrt(self.vol)
def fft_mesh(self, kpoint, band, spin=0, shift=True):
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)
evals = np.fft.ifftn(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
spin (int): the spin of the wavefunction for the desired
wavefunction (only for ISPIN = 2, default = 0)
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
"""
mesh = np.zeros(tuple(self.ng), dtype=np.complex)
tcoeffs = self.coeffs[spin][kpoint][band] if self.spin == 2 else \
self.coeffs[kpoint][band]
for gp, coeff in zip(self.Gpoints[kpoint], tcoeffs):
t = tuple(gp.astype(np.int) + (self.ng / 2).astype(np.int))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
else:
return mesh
def get_parchg(self, poscar, kpoint, band, spin=None, phase=False,
scale=2):
"""
Generates a Chgcar object, which is the charge density of the specified
wavefunction.
This function generates a Chgcar object with the charge density of the
wavefunction specified by band and kpoint (and spin, if the WAVECAR
corresponds to a spin-polarized calculation). The phase tag is a
feature that is not present in VASP. For a real wavefunction, the phase
tag being turned on means that the charge density is multiplied by the
sign of the wavefunction at that point in space. A warning is generated
if the phase tag is on and the chosen kpoint is not Gamma.
Note: Augmentation from the PAWs is NOT included in this function. The
maximal charge density will differ from the PARCHG from VASP, but the
qualitative shape of the charge density will match.
Args:
poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the
structure associated with the WAVECAR file
kpoint (int): the index of the kpoint for the wavefunction
band (int): the index of the band for the wavefunction
spin (int): optional argument to specify the spin. If the
Wavecar has ISPIN = 2, spin is None generates a
Chgcar with total spin and magnetization, and
spin == {0, 1} specifies just the spin up or
down component.
phase (bool): flag to determine if the charge density is
multiplied by the sign of the wavefunction.
Only valid for real wavefunctions.
scale (int): scaling for the FFT grid. The default value of 2 is
at least as fine as the VASP default.
Returns:
a pymatgen.io.vasp.outputs.Chgcar object
"""
if phase and not np.all(self.kpoints[kpoint] == 0.):
warnings.warn('phase == True should only be used for the Gamma '
'kpoint! I hope you know what you\'re doing!')
# scaling of ng for the fft grid, need to restore value at the end
temp_ng = self.ng
self.ng = self.ng * scale
N = np.prod(self.ng)
data = {}
if self.spin == 2:
if spin is not None:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N
den = np.abs(np.conj(wfr) * wfr)
if phase:
den = np.sign(np.real(wfr)) * den
data['total'] = den
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N
denup = np.abs(np.conj(wfr) * wfr)
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N
dendn = np.abs(np.conj(wfr) * wfr)
data['total'] = denup + dendn
data['diff'] = denup - dendn
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band)) * N
den = np.abs(np.conj(wfr) * wfr)
if phase:
den = np.sign(np.real(wfr)) * den
data['total'] = den
self.ng = temp_ng
return Chgcar(poscar, data)
class Eigenval:
"""
Object for reading EIGENVAL file.
.. attribute:: filename
string containing input filename
.. attribute:: occu_tol
tolerance for determining occupation in band properties
.. attribute:: ispin
spin polarization tag (int)
.. attribute:: nelect
number of electrons
.. attribute:: nkpt
number of kpoints
.. attribute:: nbands
number of bands
.. attribute:: kpoints
list of kpoints
.. attribute:: kpoints_weights
weights of each kpoint in the BZ, should sum to 1.
.. attribute:: eigenvalues
Eigenvalues as a dict of {(spin): np.ndarray(shape=(nkpt, nbands, 2))}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
"""
def __init__(self, filename, occu_tol=1e-8):
"""
Reads input from filename to construct Eigenval object
Args:
filename (str): filename of EIGENVAL to read in
occu_tol (float): tolerance for determining band gap
Returns:
a pymatgen.io.vasp.outputs.Eigenval object
"""
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, 'r') as f:
self.ispin = int(f.readline().split()[-1])
# useless header information
for _ in range(4):
f.readline()
self.nelect, self.nkpt, self.nbands = \
list(map(int, f.readline().split()))
self.kpoints = []
self.kpoints_weights = []
if self.ispin == 2:
self.eigenvalues = \
{Spin.up: np.zeros((self.nkpt, self.nbands, 2)),
Spin.down: np.zeros((self.nkpt, self.nbands, 2))}
else:
self.eigenvalues = \
{Spin.up: np.zeros((self.nkpt, self.nbands, 2))}
ikpt = -1
for line in f:
if re.search(r'(\s+[\-+0-9eE.]+){4}', str(line)):
ikpt += 1
kpt = list(map(float, line.split()))
self.kpoints.append(kpt[:-1])
self.kpoints_weights.append(kpt[-1])
for i in range(self.nbands):
sl = list(map(float, f.readline().split()))
if len(sl) == 3:
self.eigenvalues[Spin.up][ikpt, i, 0] = sl[1]
self.eigenvalues[Spin.up][ikpt, i, 1] = sl[2]
elif len(sl) == 5:
self.eigenvalues[Spin.up][ikpt, i, 0] = sl[1]
self.eigenvalues[Spin.up][ikpt, i, 1] = sl[3]
self.eigenvalues[Spin.down][ikpt, i, 0] = sl[2]
self.eigenvalues[Spin.down][ikpt, i, 1] = sl[4]
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
class Wavederf:
"""
Object for reading a WAVEDERF file.
Note: This file is only produced when LOPTICS is true AND vasp has been
recompiled after uncommenting the line that calls
WRT_CDER_BETWEEN_STATES_FORMATTED in linear_optics.F
.. attribute:: data
A numpy array containing the WAVEDERF data of the form below. It should
be noted that VASP uses 1-based indexing for bands, but this is
converted to 0-based numpy array indexing.
For each kpoint (in the same order as in IBZKPT), and for each pair of
bands:
[ #kpoint index
[ #band 1 index
[ #band 2 index
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
]
]
]
This structure follows the file format. Numpy array methods can be used
to fetch data in a more useful way (e.g., get matrix elements between
wo specific bands at each kpoint, fetch x/y/z components,
real/imaginary parts, abs/phase, etc. )
Author: Miguel Dias Costa
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing WAVEDERF.
"""
with zopen(filename, "rt") as f:
header = f.readline().split()
nb_kpoints = int(header[1])
nb_bands = int(header[2])
data = np.zeros((nb_kpoints, nb_bands, nb_bands, 6))
for ik in range(nb_kpoints):
for ib1 in range(nb_bands):
for ib2 in range(nb_bands):
# each line in the file includes besides the band
# indexes, which are redundant, each band's energy
# and occupation, which are already available elsewhere,
# so we store only the 6 matrix elements after this 6
# redundant values
data[ik][ib1][ib2] = [float(element)
for element in f.readline().split()[6:]]
self.data = data
self._nb_kpoints = nb_kpoints
self._nb_bands = nb_bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
@property
def nb_kpoints(self):
"""
Returns the number of k-points in the band structure calculation
"""
return self._nb_kpoints
def get_elements_between_bands(self, band_i, band_j):
"""
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
"""
if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:
raise ValueError("Band index out of bounds")
return self.data[:, band_i - 1, band_j - 1, :]
class Waveder:
"""
Class for reading a WAVEDER file.
The LOPTICS tag produces a WAVEDER file.
The WAVEDER contains the derivative of the orbitals with respect to k.
Author: Kamal Choudhary, NIST
"""
def __init__(self, filename, gamma_only=False):
"""
Args:
filename: Name of file containing WAVEDER.
"""
with open(filename, 'rb') as fp:
def readData(dtype):
""" Read records from Fortran binary file and convert to
np.array of given dtype. """
data = b''
while 1:
prefix = np.fromfile(fp, dtype=np.int32, count=1)[0]
data += fp.read(abs(prefix))
suffix = np.fromfile(fp, dtype=np.int32, count=1)[0]
if abs(prefix) - abs(suffix):
raise RuntimeError("Read wrong amount of bytes.\n"
"Expected: %d, read: %d, suffix: %d." % (prefix, len(data), suffix))
if prefix > 0:
break
return np.frombuffer(data, dtype=dtype)
nbands, nelect, nk, ispin = readData(np.int32)
_ = readData(np.float) # nodes_in_dielectric_function
_ = readData(np.float) # wplasmon
if gamma_only:
cder = readData(np.float)
else:
cder = readData(np.complex64)
cder_data = cder.reshape((3, ispin, nk, nelect, nbands)).T
self._cder_data = cder_data
self._nkpoints = nk
self._ispin = ispin
self._nelect = nelect
self._nbands = nbands
@property
def cder_data(self):
"""
Returns the orbital derivative between states
"""
return self._cder_data
@property
def nbands(self):
"""
Returns the number of bands in the calculation
"""
return self._nbands
@property
def nkpoints(self):
"""
Returns the number of k-points in the calculation
"""
return self._nkpoints
@property
def nelect(self):
"""
Returns the number of electrons in the calculation
"""
return self._nelect
def get_orbital_derivative_between_states(self, band_i, band_j, kpoint, spin, cart_dir):
"""
Method returning a value
between bands band_i and band_j for k-point index, spin-channel and cartesian direction.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
kpoint (Integer): Index of k-point
spin (Integer): Index of spin-channel (0 or 1)
cart_dir (Integer): Index of cartesian direction (0,1,2)
Returns:
a float value
"""
if band_i < 0 or band_i > self.nbands - 1 or band_j < 0 or band_j > self.nelect - 1:
raise ValueError("Band index out of bounds")
if kpoint > self.nkpoints:
raise ValueError("K-point index out of bounds")
if cart_dir > 2 or cart_dir < 0:
raise ValueError("cart_dir index out of bounds")
return self._cder_data[band_i, band_j, kpoint, spin, cart_dir]
class UnconvergedVASPWarning(Warning):
"""
Warning for unconverged vasp run.
"""
pass
|
fraricci/pymatgen
|
pymatgen/io/vasp/outputs.py
|
Python
|
mit
| 199,296
|
[
"CRYSTAL",
"VASP",
"VisIt",
"pymatgen"
] |
71686ee9422ba96a77e38f0af25438a3a0a23435225c26b5b8f6eb5f2b9e3430
|
#
# Log-likelihood functions
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import numpy as np
import scipy.special
class AR1LogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming AR(1) (autoregressive order 1) errors.
In this error model, the ith error term
:math:`\epsilon_i = x_i - f_i(\theta)` is assumed to obey the following
relationship.
.. math::
\epsilon_i = \rho \epsilon_{i-1} + \nu_i
where :math:`\nu_i` is IID Gaussian white noise with variance
:math:`\sigma^2 (1-\rho^2)`. Therefore, this likelihood is appropriate when
error terms are autocorrelated, and the parameter :math:`\rho`
determines the level of autocorrelation.
This model is parameterised as such because it leads to a simple marginal
distribution :math:`\epsilon_i \sim N(0, \sigma)`.
This class treats the error at the first time point (i=1) as fixed, which
simplifies the calculations. For sufficiently long time-series, this
conditioning on the first observation has at most a small effect on the
likelihood. Further details as well as the alternative unconditional
likelihood are available in [1]_ , chapter 5.2.
Noting that
.. math::
\nu_i = \epsilon_i - \rho \epsilon_{i-1} \sim N(0, \sigma^2 (1-\rho^2))
we thus calculate the likelihood as the product of normal likelihoods from
:math:`i=2,...,N`, for a time series with N time points.
.. math::
L(\theta, \sigma, \rho|\boldsymbol{x}) =
-\frac{N-1}{2} \log(2\pi)
- (N-1) \log(\sigma')
- \frac{1}{2\sigma'^2} \sum_{i=2}^N (\epsilon_i
- \rho \epsilon_{i-1})^2
for :math:`\sigma' = \sigma \sqrt{1-\rho^2}`.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem two parameters are added (rho, sigma),
for a multi-output problem 2 * ``n_outputs`` parameters are added.
References
----------
.. [1] Hamilton, James D. Time series analysis. Vol. 2. New Jersey:
Princeton, 1994.
"""
def __init__(self, problem):
super(AR1LogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times) - 1
self._no = problem.n_outputs()
# Add parameters to problem
self._n_parameters = problem.n_parameters() + 2 * self._no
# Pre-calculate parts
self._logn = 0.5 * (self._nt) * np.log(2 * np.pi)
def __call__(self, x):
m = 2 * self._no
parameters = x[-m:]
rho = np.asarray(parameters[0::2])
sigma = np.asarray(parameters[1::2])
if any(sigma <= 0):
return -np.inf
sigma = np.asarray(sigma) * np.sqrt(1 - rho**2)
error = self._values - self._problem.evaluate(x[:-2 * self._no])
autocorr_error = error[1:] - rho * error[:-1]
return np.sum(- self._logn - self._nt * np.log(sigma)
- np.sum(autocorr_error**2, axis=0) / (2 * sigma**2))
class ARMA11LogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming ARMA(1,1) errors.
The ARMA(1,1) model has 1 autoregressive term and 1 moving average term. It
assumes that the errors :math:`\epsilon_i = x_i - f_i(\theta)` obey
.. math::
\epsilon_i = \rho \epsilon_{i-1} + \nu_i + \phi \nu_{i-1}
where :math:`\nu_i` is IID Gaussian white noise with standard deviation
:math:`\sigma'`.
.. math::
\sigma' = \sigma \sqrt{\frac{1 - \rho^2}{1 + 2 \phi \rho + \phi^2}}
This model is parameterised as such because it leads to a simple marginal
distribution :math:`\epsilon_i \sim N(0, \sigma)`.
Due to the complexity of the exact ARMA(1,1) likelihood, this class
calculates a likelihood conditioned on initial values. This topic is
discussed further in [2]_ , chapter 5.6. Thus, for a time series defined at
points :math:`i=1,...,N`, summation begins at :math:`i=3`, and the
conditional log-likelihood is
.. math::
L(\theta, \sigma, \rho, \phi|\boldsymbol{x}) =
-\frac{N-2}{2} \log(2\pi)
- (N-2) \log(\sigma')
- \frac{1}{2\sigma'^2} \sum_{i=3}^N (\nu_i)^2
where the values of :math:`\nu_i` are calculated from the observations
according to
.. math::
\nu_i = \epsilon_i - \rho \epsilon_{i-1}
- \phi (\epsilon_{i-1} - \rho \epsilon_{i-2})
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem three parameters are added (rho, phi, sigma),
for a multi-output problem 3 * ``n_outputs`` parameters are added.
References
----------
.. [2] Hamilton, James D. Time series analysis. Vol. 2. New Jersey:
Princeton, 1994.
"""
def __init__(self, problem):
super(ARMA11LogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times) - 2
self._no = problem.n_outputs()
# Add parameters to problem
self._n_parameters = problem.n_parameters() + 3 * self._no
# Pre-calculate parts
self._logn = 0.5 * (self._nt) * np.log(2 * np.pi)
def __call__(self, x):
m = 3 * self._no
parameters = x[-m:]
rho = np.asarray(parameters[0::3])
phi = np.asarray(parameters[1::3])
sigma = np.asarray(parameters[2::3])
if any(sigma <= 0):
return -np.inf
sigma = (
sigma *
np.sqrt((1.0 - rho**2) / (1.0 + 2.0 * phi * rho + phi**2))
)
error = self._values - self._problem.evaluate(x[:-m])
v = error[1:] - rho * error[:-1]
autocorr_error = v[1:] - phi * v[:-1]
return np.sum(- self._logn - self._nt * np.log(sigma)
- np.sum(autocorr_error**2, axis=0) / (2 * sigma**2))
class CauchyLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming independent Cauchy-distributed noise
at each time point, and adds one parameter: the scale (``sigma``).
For a noise characterised by ``sigma``, the log-likelihood is of the form:
.. math::
\log{L(\theta, \sigma)} =
-N\log \pi - N\log \sigma
-\sum_{i=1}^N\log(1 +
\frac{x_i - f(\theta)}{\sigma}^2)
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem one parameter is added ``sigma``, where
``sigma`` is scale, for a multi-output problem ``n_outputs``
parameters are added.
"""
def __init__(self, problem):
super(CauchyLogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times)
self._no = problem.n_outputs()
# Add parameters to problem (one for each output)
self._n_parameters = problem.n_parameters() + self._no
# Pre-calculate
self._n = len(self._times)
self._n_log_pi = self._n * np.log(np.pi)
def __call__(self, x):
# For multiparameter problems the parameters are stored as
# (model_params_1, model_params_2, ..., model_params_k,
# sigma_1, sigma_2,...)
n = self._n
m = self._no
# Distribution parameters
sigma = np.asarray(x[-m:])
if any(sigma <= 0):
return -np.inf
# problem parameters
problem_parameters = x[:-m]
error = self._values - self._problem.evaluate(problem_parameters)
# Calculate
return np.sum(
- self._n_log_pi
- n * np.log(sigma)
- np.sum(np.log(1 + (error / sigma)**2), axis=0)
)
class ConstantAndMultiplicativeGaussianLogLikelihood(
pints.ProblemLogLikelihood):
r"""
Calculates the log-likelihood assuming a mixed error model of a
Gaussian base-level noise and a Gaussian heteroscedastic noise.
For a time series model :math:`f(t| \theta)` with parameters :math:`\theta`
, the ConstantAndMultiplicativeGaussianLogLikelihood assumes that the
model predictions :math:`X` are Gaussian distributed according to
.. math::
X(t| \theta , \sigma _{\text{base}}, \sigma _{\text{rel}}) =
f(t| \theta) + (\sigma _{\text{base}} + \sigma _{\text{rel}}
f(t| \theta)^\eta ) \, \epsilon ,
where :math:`\epsilon` is a i.i.d. standard Gaussian random variable
.. math::
\epsilon \sim \mathcal{N}(0, 1).
For each output in the problem, this likelihood introduces three new scalar
parameters: a base-level scale :math:`\sigma _{\text{base}}`; an
exponential power :math:`\eta`; and a scale relative to the model output
:math:`\sigma _{\text{rel}}`.
The resulting log-likelihood of a constant and multiplicative Gaussian
error model is
.. math::
\log L(\theta, \sigma _{\text{base}}, \eta ,
\sigma _{\text{rel}} | X^{\text{obs}})
= -\frac{n_t}{2} \log 2 \pi
-\sum_{i=1}^{n_t}\log \sigma _{\text{tot}, i}
- \sum_{i=1}^{n_t}
\frac{(X^{\text{obs}}_i - f(t_i| \theta))^2}
{2\sigma ^2_{\text{tot}, i}},
where :math:`n_t` is the number of measured time points in the time series,
:math:`X^{\text{obs}}_i` is the observation at time point :math:`t_i`, and
:math:`\sigma _{\text{tot}, i}=\sigma _{\text{base}} +\sigma _{\text{rel}}
f(t_i| \theta)^\eta` is the total standard deviation of the error at time
:math:`t_i`.
For a system with :math:`n_o` outputs, this becomes
.. math::
\log L(\theta, \sigma _{\text{base}}, \eta ,
\sigma _{\text{rel}} | X^{\text{obs}})
= -\frac{n_tn_o}{2} \log 2 \pi
-\sum_{j=1}^{n_0}\sum_{i=1}^{n_t}\log \sigma _{\text{tot}, ij}
- \sum_{j=1}^{n_0}\sum_{i=1}^{n_t}
\frac{(X^{\text{obs}}_{ij} - f_j(t_i| \theta))^2}
{2\sigma ^2_{\text{tot}, ij}},
where :math:`n_o` is the number of outputs of the model,
:math:`X^{\text{obs}}_{ij}` is the observation at time point :math:`t_i`
of output :math:`j`, and
:math:`\sigma _{\text{tot}, ij}=\sigma _{\text{base}, j} +
\sigma _{\text{rel}, j}f_j(t_i| \theta)^{\eta _j}` is the total standard
deviation of the error at time :math:`t_i` of output :math:`j`.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
``problem``
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem three parameters are added
(:math:`\sigma _{\text{base}}`, :math:`\eta`,
:math:`\sigma _{\text{rel}}`),
for a multi-output problem :math:`3n_o` parameters are added
(:math:`\sigma _{\text{base},1},\ldots , \sigma _{\text{base},n_o},
\eta _1,\ldots , \eta _{n_o}, \sigma _{\text{rel},1}, \ldots ,
\sigma _{\text{rel},n_o})`.
"""
def __init__(self, problem):
super(ConstantAndMultiplicativeGaussianLogLikelihood, self).__init__(
problem)
# Get number of times and number of noise parameters
self._nt = len(self._times)
self._no = problem.n_outputs()
self._np = 3 * self._no
# Add parameters to problem
self._n_parameters = problem.n_parameters() + self._np
# Pre-calculate the constant part of the likelihood
self._logn = -0.5 * self._nt * self._no * np.log(2 * np.pi)
def __call__(self, parameters):
# Get parameters from input
noise_parameters = np.asarray(parameters[-self._np:])
sigma_base = noise_parameters[:self._no]
eta = noise_parameters[self._no:2 * self._no]
sigma_rel = noise_parameters[2 * self._no:]
# Evaluate noise-free model (n_times, n_outputs)
function_values = self._problem.evaluate(parameters[:-self._np])
# Compute error (n_times, n_outputs)
error = self._values - function_values
# Compute total standard deviation
sigma_tot = sigma_base + sigma_rel * function_values**eta
if np.any(np.asarray(sigma_tot) <= 0):
return -np.inf
# Compute log-likelihood
# (inner sums over time points, outer sum over parameters)
log_likelihood = self._logn - np.sum(
np.sum(np.log(sigma_tot), axis=0)
+ 0.5 * np.sum(error**2 / sigma_tot**2, axis=0))
return log_likelihood
def evaluateS1(self, parameters):
r"""
See :meth:`LogPDF.evaluateS1()`.
The partial derivatives of the log-likelihood w.r.t. the model
parameters are
.. math::
\frac{\partial \log L}{\partial \theta _k}
=& -\sum_{i,j}\sigma _{\text{rel},j}\eta _j\frac{
f_j(t_i| \theta)^{\eta _j-1}}
{\sigma _{\text{tot}, ij}}
\frac{\partial f_j(t_i| \theta)}{\partial \theta _k}
+ \sum_{i,j}
\frac{X^{\text{obs}}_{ij} - f_j(t_i| \theta)}
{\sigma ^2_{\text{tot}, ij}}
\frac{\partial f_j(t_i| \theta)}{\partial \theta _k} \\
&+\sum_{i,j}\sigma _{\text{rel},j}\eta _j
\frac{(X^{\text{obs}}_{ij} - f_j(t_i| \theta))^2}
{\sigma ^3_{\text{tot}, ij}}f_j(t_i| \theta)^{\eta _j-1}
\frac{\partial f_j(t_i| \theta)}{\partial \theta _k} \\
\frac{\partial \log L}{\partial \sigma _{\text{base}, j}}
=& -\sum ^{n_t}_{i=1}\frac{1}{\sigma _{\text{tot}, ij}}
+\sum ^{n_t}_{i=1}
\frac{(X^{\text{obs}}_{ij} - f_j(t_i| \theta))^2}
{\sigma ^3_{\text{tot}, ij}} \\
\frac{\partial \log L}{\partial \eta _j}
=& -\sigma _{\text{rel},j}\eta _j\sum ^{n_t}_{i=1}
\frac{f_j(t_i| \theta)^{\eta _j}\log f_j(t_i| \theta)}
{\sigma _{\text{tot}, ij}}
+ \sigma _{\text{rel},j}\eta _j \sum ^{n_t}_{i=1}
\frac{(X^{\text{obs}}_{ij} - f_j(t_i| \theta))^2}
{\sigma ^3_{\text{tot}, ij}}f_j(t_i| \theta)^{\eta _j}
\log f_j(t_i| \theta) \\
\frac{\partial \log L}{\partial \sigma _{\text{rel},j}}
=& -\sum ^{n_t}_{i=1}
\frac{f_j(t_i| \theta)^{\eta _j}}{\sigma _{\text{tot}, ij}}
+ \sum ^{n_t}_{i=1}
\frac{(X^{\text{obs}}_{ij} - f_j(t_i| \theta))^2}
{\sigma ^3_{\text{tot}, ij}}f_j(t_i| \theta)^{\eta _j},
where :math:`i` sums over the measurement time points and :math:`j`
over the outputs of the model.
"""
L = self.__call__(parameters)
if np.isneginf(L):
return L, np.tile(np.nan, self._n_parameters)
# Get parameters from input
# Shape sigma_base, eta, sigma_rel = (n_outputs,)
noise_parameters = np.asarray(parameters[-self._np:])
sigma_base = noise_parameters[:self._no]
eta = noise_parameters[self._no:2 * self._no]
sigma_rel = noise_parameters[-self._no:]
# Evaluate noise-free model, and get residuals
# y shape = (n_times,) or (n_times, n_outputs)
# dy shape = (n_times, n_model_parameters) or
# (n_times, n_outputs, n_model_parameters)
y, dy = self._problem.evaluateS1(parameters[:-self._np])
# Reshape y and dy, in case we're working with a single-output problem
# Shape y = (n_times, n_outputs)
# Shape dy = (n_model_parameters, n_times, n_outputs)
y = y.reshape(self._nt, self._no)
dy = np.transpose(
dy.reshape(self._nt, self._no, self._n_parameters - self._np),
axes=(2, 0, 1))
# Compute error
# Note: Must be (data - simulation), sign now matters!
# Shape: (n_times, output)
error = self._values.reshape(self._nt, self._no) - y
# Compute total standard deviation
sigma_tot = sigma_base + sigma_rel * y**eta
# Compute derivative w.r.t. model parameters
dtheta = -np.sum(sigma_rel * eta * np.sum(
y**(eta - 1) * dy / sigma_tot, axis=1), axis=1) + \
np.sum(error * dy / sigma_tot**2, axis=(1, 2)) + np.sum(
sigma_rel * eta * np.sum(
error**2 * y**(eta - 1) * dy / sigma_tot**3, axis=1),
axis=1)
# Compute derivative w.r.t. sigma base
dsigma_base = - np.sum(1 / sigma_tot, axis=0) + np.sum(
error**2 / sigma_tot**3, axis=0)
# Compute derivative w.r.t. eta
deta = -sigma_rel * (
np.sum(y**eta * np.log(y) / sigma_tot, axis=0) -
np.sum(
error**2 / sigma_tot**3 * y**eta * np.log(y),
axis=0))
# Compute derivative w.r.t. sigma rel
dsigma_rel = -np.sum(y**eta / sigma_tot, axis=0) + np.sum(
error**2 / sigma_tot**3 * y**eta, axis=0)
# Collect partial derivatives
dL = np.hstack((dtheta, dsigma_base, deta, dsigma_rel))
# Return
return L, dL
class GaussianIntegratedUniformLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming independent Gaussian-distributed noise
at each time point where :math:`\sigma\sim U(a,b)` has been integrated out
of the joint posterior of :math:`p(\theta,\sigma|X)`,
.. math::
\begin{align} p(\theta|X) &= \int_{0}^{\infty} p(\theta, \sigma|X)
\mathrm{d}\sigma\\
&\propto \int_{0}^{\infty} p(X|\theta, \sigma) p(\theta, \sigma)
\mathrm{d}\sigma,\end{align}
Note that this is exactly the same statistical model as
:class:`pints.GaussianLogLikelihood` with a uniform prior on
:math:`\sigma`.
A possible advantage of this log-likelihood compared with using a
:class:`pints.GaussianLogLikelihood`, is that it has one fewer parameters
(:math:`sigma`) which may speed up convergence to the posterior
distribution, especially for multi-output problems which will have
``n_outputs`` fewer parameter dimensions.
The log-likelihood is given in terms of the sum of squared errors:
.. math::
SSE = \sum_{i=1}^n (f_i(\theta) - y_i)^2
and is given up to a normalisation constant by:
.. math::
\begin{align}
\text{log} L =
& - n / 2 \text{log}(\pi) \\
& - \text{log}(2 (b - a) \sqrt(2)) \\
& + (1 / 2 - n / 2) \text{log}(SSE) \\
& + \text{log}\left[\Gamma((n - 1) / 2, \frac{SSE}{2 b^2}) -
\Gamma((n - 1) / 2, \frac{SSE}{2 a^2}) \right]
\end{align}
where :math:`\Gamma(u,v)` is the upper incomplete gamma function as defined
here: https://en.wikipedia.org/wiki/Incomplete_gamma_function
This log-likelihood is inherently a Bayesian method since it assumes a
uniform prior on :math:`\sigma\sim U(a,b)`. However using this likelihood
in optimisation routines should yield the same estimates as the full
:class:`pints.GaussianLogLikelihood`.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`.
lower
The lower limit on the uniform prior on `sigma`. Must be
non-negative.
upper
The upper limit on the uniform prior on `sigma`.
"""
def __init__(self, problem, lower, upper):
super(GaussianIntegratedUniformLogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times)
self._no = problem.n_outputs()
# Add parameters to problem
self._n_parameters = problem.n_parameters()
a = lower
if np.isscalar(a):
a = np.ones(self._no) * float(a)
else:
a = pints.vector(a)
if len(a) != self._no:
raise ValueError(
'Lower limit on uniform prior for sigma must be a ' +
' scalar or a vector of length n_outputs.')
if np.any(a < 0):
raise ValueError('Lower limit on uniform prior for sigma ' +
'must be non-negative.')
b = upper
if np.isscalar(b):
b = np.ones(self._no) * float(b)
else:
b = pints.vector(b)
if len(b) != self._no:
raise ValueError(
'Upper limit on uniform prior for sigma must be a ' +
' scalar or a vector of length n_outputs.')
if np.any(b <= 0):
raise ValueError('Upper limit on uniform prior for sigma ' +
'must be positive.')
diff = b - a
if np.any(diff <= 0):
raise ValueError('Upper limit on uniform prior for sigma ' +
'must exceed lower limit.')
self._a = a
self._b = b
# Pre-calculate
n = self._nt
self._n_minus_1_over_2 = (n - 1.0) / 2.0
self._const_a_0 = (
-n * np.log(b) - (n / 2.0) * np.log(np.pi) -
np.log(2 * np.sqrt(2))
)
self._b2 = self._b**2
self._a2 = self._a**2
self._const_general = (
-(n / 2.0) * np.log(np.pi) - np.log(2 * np.sqrt(2) * (b - a))
)
self._log_gamma = scipy.special.gammaln(self._n_minus_1_over_2)
self._two_power = 2**(1 / 2 - n / 2)
def __call__(self, x):
error = self._values - self._problem.evaluate(x)
sse = np.sum(error**2, axis=0)
# Calculate
log_temp = np.zeros(len(self._a2))
sse = pints.vector(sse)
for i, a in enumerate(self._a2):
if a != 0:
log_temp[i] = np.log(
scipy.special.gammaincc(self._n_minus_1_over_2,
sse[i] / (2 * self._b2[i])) -
scipy.special.gammaincc(self._n_minus_1_over_2,
sse[i] / (2 * a)))
else:
log_temp[i] = np.log(
scipy.special.gammaincc(self._n_minus_1_over_2,
sse[i] / (2 * self._b2[i])))
return np.sum(
self._const_general -
self._n_minus_1_over_2 * np.log(sse) +
self._log_gamma +
log_temp
)
class GaussianKnownSigmaLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming independent Gaussian noise at each
time point, using a known value for the standard deviation (sigma) of that
noise:
.. math::
\log{L(\theta | \sigma,\boldsymbol{x})} =
-\frac{N}{2}\log{2\pi}
-N\log{\sigma}
-\frac{1}{2\sigma^2}\sum_{i=1}^N{(x_i - f_i(\theta))^2}
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`.
sigma
The standard devation(s) of the noise. Can be a single value or a
sequence of sigma's for each output. Must be greater than zero.
"""
def __init__(self, problem, sigma):
super(GaussianKnownSigmaLogLikelihood, self).__init__(problem)
# Store counts
self._no = problem.n_outputs()
self._np = problem.n_parameters()
self._nt = problem.n_times()
# Check sigma
if np.isscalar(sigma):
sigma = np.ones(self._no) * float(sigma)
else:
sigma = pints.vector(sigma)
if len(sigma) != self._no:
raise ValueError(
'Sigma must be a scalar or a vector of length n_outputs.')
if np.any(sigma <= 0):
raise ValueError('Standard deviation must be greater than zero.')
# Pre-calculate parts
self._offset = -0.5 * self._nt * np.log(2 * np.pi)
self._offset -= self._nt * np.log(sigma)
self._multip = -1 / (2.0 * sigma**2)
# Pre-calculate S1 parts
self._isigma2 = sigma**-2
def __call__(self, x):
error = self._values - self._problem.evaluate(x)
return np.sum(self._offset + self._multip * np.sum(error**2, axis=0))
def evaluateS1(self, x):
""" See :meth:`LogPDF.evaluateS1()`. """
# Evaluate, and get residuals
y, dy = self._problem.evaluateS1(x)
# Reshape dy, in case we're working with a single-output problem
dy = dy.reshape(self._nt, self._no, self._np)
# Note: Must be (data - simulation), sign now matters!
r = self._values - y
# Calculate log-likelihood
L = np.sum(self._offset + self._multip * np.sum(r**2, axis=0))
# Calculate derivative
dL = np.sum(
(self._isigma2 * np.sum((r.T * dy.T).T, axis=0).T).T, axis=0)
# Return
return L, dL
class GaussianLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming independent Gaussian noise at each
time point, and adds a parameter representing the standard deviation
(sigma) of the noise on each output.
For a noise level of ``sigma``, the likelihood becomes:
.. math::
L(\theta, \sigma|\boldsymbol{x})
= p(\boldsymbol{x} | \theta, \sigma)
= \prod_{j=1}^{n_t} \frac{1}{\sqrt{2\pi\sigma^2}}\exp\left(
-\frac{(x_j - f_j(\theta))^2}{2\sigma^2}\right)
leading to a log likelihood of:
.. math::
\log{L(\theta, \sigma|\boldsymbol{x})} =
-\frac{n_t}{2} \log{2\pi}
-n_t \log{\sigma}
-\frac{1}{2\sigma^2}\sum_{j=1}^{n_t}{(x_j - f_j(\theta))^2}
where ``n_t`` is the number of time points in the series, ``x_j`` is the
sampled data at time ``j`` and ``f_j`` is the simulated data at time ``j``.
For a system with ``n_o`` outputs, this becomes
.. math::
\log{L(\theta, \sigma|\boldsymbol{x})} =
-\frac{n_t n_o}{2}\log{2\pi}
-\sum_{i=1}^{n_o}{ {n_t}\log{\sigma_i} }
-\sum_{i=1}^{n_o}{\left[
\frac{1}{2\sigma_i^2}\sum_{j=1}^{n_t}{(x_j - f_j(\theta))^2}
\right]}
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem a single parameter is added, for a multi-output
problem ``n_outputs`` parameters are added.
"""
def __init__(self, problem):
super(GaussianLogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times)
self._no = problem.n_outputs()
# Add parameters to problem
self._n_parameters = problem.n_parameters() + self._no
# Pre-calculate parts
self._logn = 0.5 * self._nt * np.log(2 * np.pi)
def __call__(self, x):
sigma = np.asarray(x[-self._no:])
if any(sigma <= 0):
return -np.inf
error = self._values - self._problem.evaluate(x[:-self._no])
return np.sum(- self._logn - self._nt * np.log(sigma)
- np.sum(error**2, axis=0) / (2 * sigma**2))
def evaluateS1(self, x):
""" See :meth:`LogPDF.evaluateS1()`. """
sigma = np.asarray(x[-self._no:])
# Calculate log-likelihood
L = self.__call__(x)
if np.isneginf(L):
return L, np.tile(np.nan, self._n_parameters)
# Evaluate, and get residuals
y, dy = self._problem.evaluateS1(x[:-self._no])
# Reshape dy, in case we're working with a single-output problem
dy = dy.reshape(self._nt, self._no, self._n_parameters - self._no)
# Note: Must be (data - simulation), sign now matters!
r = self._values - y
# Calculate derivatives in the model parameters
dL = np.sum(
(sigma**(-2.0) * np.sum((r.T * dy.T).T, axis=0).T).T, axis=0)
# Calculate derivative wrt sigma
dsigma = -self._nt / sigma + sigma**(-3.0) * np.sum(r**2, axis=0)
dL = np.concatenate((dL, np.array(list(dsigma))))
# Return
return L, dL
class KnownNoiseLogLikelihood(GaussianKnownSigmaLogLikelihood):
""" Deprecated alias of :class:`GaussianKnownSigmaLogLikelihood`. """
def __init__(self, problem, sigma):
# Deprecated on 2019-02-06
import warnings
warnings.warn(
'The class `pints.KnownNoiseLogLikelihood` is deprecated.'
' Please use `pints.GaussianKnownSigmaLogLikelihood` instead.')
super(KnownNoiseLogLikelihood, self).__init__(problem, sigma)
class MultiplicativeGaussianLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates the log-likelihood for a time-series model assuming a
heteroscedastic Gaussian error of the model predictions
:math:`f(t, \theta )`.
This likelihood introduces two new scalar parameters for each dimension of
the model output: an exponential power :math:`\eta` and a scale
:math:`\sigma`.
A heteroscedascic Gaussian noise model assumes that the observable
:math:`X` is Gaussian distributed around the model predictions
:math:`f(t, \theta )` with a standard deviation that scales with
:math:`f(t, \theta )`
.. math::
X(t) = f(t, \theta) + \sigma f(t, \theta)^\eta v(t)
where :math:`v(t)` is a standard i.i.d. Gaussian random variable
.. math::
v(t) \sim \mathcal{N}(0, 1).
This model leads to a log likelihood of the model parameters of
.. math::
\log{L(\theta, \eta , \sigma | X^{\text{obs}})} =
-\frac{n_t}{2} \log{2 \pi}
-\sum_{i=1}^{n_t}{\log{f(t_i, \theta)^\eta \sigma}}
-\frac{1}{2}\sum_{i=1}^{n_t}\left(
\frac{X^{\text{obs}}_{i} - f(t_i, \theta)}
{f(t_i, \theta)^\eta \sigma}\right) ^2,
where :math:`n_t` is the number of time points in the series, and
:math:`X^{\text{obs}}_{i}` the measurement at time :math:`t_i`.
For a system with :math:`n_o` outputs, this becomes
.. math::
\log{L(\theta, \eta , \sigma | X^{\text{obs}})} =
-\frac{n_t n_o}{2} \log{2 \pi}
-\sum ^{n_o}_{j=1}\sum_{i=1}^{n_t}{\log{f_j(t_i, \theta)^\eta
\sigma _j}}
-\frac{1}{2}\sum ^{n_o}_{j=1}\sum_{i=1}^{n_t}\left(
\frac{X^{\text{obs}}_{ij} - f_j(t_i, \theta)}
{f_j(t_i, \theta)^\eta \sigma _j}\right) ^2,
where :math:`n_o` is the number of outputs of the model, and
:math:`X^{\text{obs}}_{ij}` the measurement of output :math:`j` at
time point :math:`t_i`.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
``problem``
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem two parameters are added (:math:`\eta`,
:math:`\sigma`), for a multi-output problem 2 times :math:`n_o`
parameters are added.
"""
def __init__(self, problem):
super(MultiplicativeGaussianLogLikelihood, self).__init__(problem)
# Get number of times and number of outputs
self._nt = len(self._times)
no = problem.n_outputs()
self._np = 2 * no # 2 parameters added per output
# Add parameters to problem
self._n_parameters = problem.n_parameters() + self._np
# Pre-calculate the constant part of the likelihood
self._logn = 0.5 * self._nt * no * np.log(2 * np.pi)
def __call__(self, x):
# Get noise parameters
noise_parameters = x[-self._np:]
eta = np.asarray(noise_parameters[0::2])
sigma = np.asarray(noise_parameters[1::2])
if any(sigma <= 0):
return -np.inf
# Evaluate function (n_times, n_output)
function_values = self._problem.evaluate(x[:-self._np])
# Compute likelihood
log_likelihood = \
-self._logn - np.sum(
np.sum(np.log(function_values**eta * sigma), axis=0)
+ 0.5 / sigma**2 * np.sum(
(self._values - function_values)**2
/ function_values ** (2 * eta), axis=0))
return log_likelihood
class ScaledLogLikelihood(pints.ProblemLogLikelihood):
"""
Calculates a log-likelihood based on a (conditional)
:class:`ProblemLogLikelihood` divided by the number of time samples.
The returned value will be ``(1 / n) * log_likelihood(x|problem)``, where
``n`` is the number of time samples multiplied by the number of outputs.
This log-likelihood operates on both single and multi-output problems.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
log_likelihood
A :class:`ProblemLogLikelihood` to scale.
"""
def __init__(self, log_likelihood):
# Check arguments
if not isinstance(log_likelihood, pints.ProblemLogLikelihood):
raise ValueError(
'Given log_likelihood must extend pints.ProblemLogLikelihood')
# Call parent constructor
super(ScaledLogLikelihood, self).__init__(log_likelihood._problem)
# Store log-likelihood
self._log_likelihood = log_likelihood
# Pre-calculate parts
self._f = 1.0 / np.product(self._values.shape)
def __call__(self, x):
return self._f * self._log_likelihood(x)
def evaluateS1(self, x):
"""
See :meth:`LogPDF.evaluateS1()`.
This method only works if the underlying :class:`LogPDF` object
implements the optional method :meth:`LogPDF.evaluateS1()`!
"""
a, b = self._log_likelihood.evaluateS1(x)
return self._f * a, self._f * np.asarray(b)
class StudentTLogLikelihood(pints.ProblemLogLikelihood):
r"""
Calculates a log-likelihood assuming independent Student-t-distributed
noise at each time point, and adds two parameters: one representing the
degrees of freedom (``nu``), the other representing the scale (``sigma``).
For a noise characterised by ``nu`` and ``sigma``, the log likelihood is of
the form:
.. math::
\log{L(\theta, \nu, \sigma|\boldsymbol{x})} =
N\frac{\nu}{2}\log(\nu) - N\log(\sigma) -
N\log B(\nu/2, 1/2)
-\frac{1+\nu}{2}\sum_{i=1}^N\log(\nu +
\frac{x_i - f(\theta)}{\sigma}^2)
where ``B(.,.)`` is a beta function.
Extends :class:`ProblemLogLikelihood`.
Parameters
----------
problem
A :class:`SingleOutputProblem` or :class:`MultiOutputProblem`. For a
single-output problem two parameters are added ``(nu, sigma)``, where
``nu`` is the degrees of freedom and ``sigma`` is scale, for a
multi-output problem ``2 * n_outputs`` parameters are added.
"""
def __init__(self, problem):
super(StudentTLogLikelihood, self).__init__(problem)
# Get number of times, number of outputs
self._nt = len(self._times)
self._no = problem.n_outputs()
# Add parameters to problem (two for each output)
self._n_parameters = problem.n_parameters() + 2 * self._no
# Pre-calculate
self._n = len(self._times)
def __call__(self, x):
# For multiparameter problems the parameters are stored as
# (model_params_1, model_params_2, ..., model_params_k,
# nu_1, sigma_1, nu_2, sigma_2,...)
n = self._n
m = 2 * self._no
# problem parameters
problem_parameters = x[:-m]
error = self._values - self._problem.evaluate(problem_parameters)
# Distribution parameters
parameters = x[-m:]
nu = np.asarray(parameters[0::2])
sigma = np.asarray(parameters[1::2])
if any(nu <= 0) or any(sigma <= 0):
return -np.inf
# Calculate
return np.sum(
+ 0.5 * n * nu * np.log(nu)
- n * np.log(sigma)
- n * np.log(scipy.special.beta(0.5 * nu, 0.5))
- 0.5 * (1 + nu) * np.sum(np.log(nu + (error / sigma)**2), axis=0)
)
class UnknownNoiseLogLikelihood(GaussianLogLikelihood):
"""
Deprecated alias of :class:`GaussianLogLikelihood`
"""
def __init__(self, problem):
# Deprecated on 2019-02-06
import warnings
warnings.warn(
'The class `pints.KnownNoiseLogLikelihood` is deprecated.'
' Please use `pints.GaussianLogLikelihood` instead.')
super(UnknownNoiseLogLikelihood, self).__init__(problem)
|
martinjrobins/hobo
|
pints/_log_likelihoods.py
|
Python
|
bsd-3-clause
| 36,995
|
[
"Gaussian"
] |
d5ef51c606f6babcdb5b673120c3cfced0eeab42fe462f10a8e6bde53cd62250
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import os
import warnings
import datetime
from distutils.version import StrictVersion
import cf_units
import numpy as np
import iris
import iris.cube
import iris.coord_systems
import iris.coords
if tests.GRIB_AVAILABLE:
import gribapi
@tests.skip_data
@tests.skip_grib
class TestLoadSave(tests.TestGribMessage):
def setUp(self):
self.skip_keys = []
if gribapi.__version__ < '1.13':
self.skip_keys = ['g2grid', 'gridDescriptionSectionPresent',
'latitudeOfLastGridPointInDegrees',
'iDirectionIncrementInDegrees',
'longitudeOfLastGridPointInDegrees',
'latLonValues', 'distinctLatitudes',
'distinctLongitudes', 'lengthOfHeaders',
'values', 'x']
def test_latlon_forecast_plev(self):
source_grib = tests.get_data_path(("GRIB", "uk_t", "uk_t.grib2"))
cubes = iris.load(source_grib)
with self.temp_filename(suffix='.grib2') as temp_file_path:
iris.save(cubes, temp_file_path)
expect_diffs = {'totalLength': (4837, 4832),
'productionStatusOfProcessedData': (0, 255),
'scaleFactorOfRadiusOfSphericalEarth': (4294967295,
0),
'shapeOfTheEarth': (0, 1),
'scaledValueOfRadiusOfSphericalEarth': (4294967295,
6367470),
'typeOfGeneratingProcess': (0, 255),
'generatingProcessIdentifier': (128, 255),
}
self.assertGribMessageDifference(source_grib, temp_file_path,
expect_diffs, self.skip_keys,
skip_sections=[2])
def test_rotated_latlon(self):
source_grib = tests.get_data_path(("GRIB", "rotated_nae_t",
"sensible_pole.grib2"))
cubes = iris.load(source_grib)
with self.temp_filename(suffix='.grib2') as temp_file_path:
iris.save(cubes, temp_file_path)
expect_diffs = {'totalLength': (648196, 648191),
'productionStatusOfProcessedData': (0, 255),
'scaleFactorOfRadiusOfSphericalEarth': (4294967295,
0),
'shapeOfTheEarth': (0, 1),
'scaledValueOfRadiusOfSphericalEarth': (4294967295,
6367470),
'iDirectionIncrement': (109994, 109993),
'longitudeOfLastGridPoint': (392109982, 32106370),
'latitudeOfLastGridPoint': (19419996, 19419285),
'typeOfGeneratingProcess': (0, 255),
'generatingProcessIdentifier': (128, 255),
}
self.assertGribMessageDifference(source_grib, temp_file_path,
expect_diffs, self.skip_keys,
skip_sections=[2])
def test_time_mean(self):
# This test for time-mean fields also tests negative forecast time.
try:
iris.fileformats.grib.hindcast_workaround = True
source_grib = tests.get_data_path(("GRIB", "time_processed",
"time_bound.grib2"))
cubes = iris.load(source_grib)
expect_diffs = {'totalLength': (21232, 21227),
'productionStatusOfProcessedData': (0, 255),
'scaleFactorOfRadiusOfSphericalEarth': (4294967295,
0),
'shapeOfTheEarth': (0, 1),
'scaledValueOfRadiusOfSphericalEarth': (4294967295,
6367470),
'longitudeOfLastGridPoint': (356249908, 356249810),
'latitudeOfLastGridPoint': (-89999938, -89999944),
'typeOfGeneratingProcess': (0, 255),
'generatingProcessIdentifier': (128, 255),
'typeOfTimeIncrement': (2, 255)
}
self.skip_keys.append('stepType')
self.skip_keys.append('stepTypeInternal')
with self.temp_filename(suffix='.grib2') as temp_file_path:
iris.save(cubes, temp_file_path)
self.assertGribMessageDifference(source_grib, temp_file_path,
expect_diffs, self.skip_keys,
skip_sections=[2])
finally:
iris.fileformats.grib.hindcast_workaround = False
@tests.skip_data
@tests.skip_grib
class TestCubeSave(tests.IrisTest):
# save fabricated cubes
def _load_basic(self):
path = tests.get_data_path(("GRIB", "uk_t", "uk_t.grib2"))
return iris.load(path)[0]
def test_params(self):
# TODO
pass
def test_originating_centre(self):
# TODO
pass
def test_irregular(self):
cube = self._load_basic()
lat_coord = cube.coord("latitude")
cube.remove_coord("latitude")
new_lats = np.append(lat_coord.points[:-1], lat_coord.points[0]) # Irregular
cube.add_aux_coord(iris.coords.AuxCoord(new_lats, "latitude", units="degrees", coord_system=lat_coord.coord_system), 0)
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib)
os.remove(saved_grib)
def test_non_latlon(self):
cube = self._load_basic()
cube.coord(dimensions=[0]).coord_system = None
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib)
os.remove(saved_grib)
def test_forecast_period(self):
# unhandled unit
cube = self._load_basic()
cube.coord("forecast_period").units = cf_units.Unit("years")
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib)
os.remove(saved_grib)
def test_unhandled_vertical(self):
# unhandled level type
cube = self._load_basic()
# Adjust the 'pressure' coord to make it into an "unrecognised Z coord"
p_coord = cube.coord("pressure")
p_coord.rename("not the messiah")
p_coord.units = 'K'
p_coord.attributes['positive'] = 'up'
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
with self.assertRaises(iris.exceptions.TranslationError):
iris.save(cube, saved_grib)
os.remove(saved_grib)
def test_scalar_int32_pressure(self):
# Make sure we can save a scalar int32 coordinate with unit conversion.
cube = self._load_basic()
cube.coord("pressure").points = np.array([200], dtype=np.int32)
cube.coord("pressure").units = "hPa"
with self.temp_filename(".grib2") as testfile:
iris.save(cube, testfile)
def test_bounded_level(self):
cube = iris.load_cube(tests.get_data_path(("GRIB", "uk_t",
"uk_t.grib2")))
# Changing pressure to altitude due to grib api bug:
# https://github.com/SciTools/iris/pull/715#discussion_r5901538
cube.remove_coord("pressure")
cube.add_aux_coord(iris.coords.AuxCoord(
1030.0, long_name='altitude', units='m',
bounds=np.array([111.0, 1949.0])))
with self.temp_filename(".grib2") as testfile:
iris.save(cube, testfile)
with open(testfile, "rb") as saved_file:
g = gribapi.grib_new_from_file(saved_file)
self.assertEqual(
gribapi.grib_get_double(g,
"scaledValueOfFirstFixedSurface"),
111.0)
self.assertEqual(
gribapi.grib_get_double(g,
"scaledValueOfSecondFixedSurface"),
1949.0)
@tests.skip_grib
class TestHandmade(tests.IrisTest):
def _lat_lon_cube_no_time(self):
"""Returns a cube with a latitude and longitude suitable for testing saving to PP/NetCDF etc."""
cube = iris.cube.Cube(np.arange(12, dtype=np.int32).reshape((3, 4)))
cs = iris.coord_systems.GeogCS(6371229)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(4) * 90 + -180, 'longitude', units='degrees', coord_system=cs), 1)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(3) * 45 + -90, 'latitude', units='degrees', coord_system=cs), 0)
return cube
def _cube_time_no_forecast(self):
cube = self._lat_lon_cube_no_time()
unit = cf_units.Unit('hours since epoch', calendar=cf_units.CALENDAR_GREGORIAN)
dt = datetime.datetime(2010, 12, 31, 12, 0)
cube.add_aux_coord(iris.coords.AuxCoord(np.array([unit.date2num(dt)], dtype=np.float64), 'time', units=unit))
return cube
def _cube_with_forecast(self):
cube = self._cube_time_no_forecast()
cube.add_aux_coord(iris.coords.AuxCoord(np.array([6], dtype=np.int32), 'forecast_period', units='hours'))
return cube
def _cube_with_pressure(self):
cube = self._cube_with_forecast()
cube.add_aux_coord(iris.coords.DimCoord(np.int32(10), 'air_pressure', units='Pa'))
return cube
def _cube_with_time_bounds(self):
cube = self._cube_with_pressure()
cube.coord("time").bounds = np.array([[0, 100]])
return cube
def test_no_time_cube(self):
cube = self._lat_lon_cube_no_time()
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib)
os.remove(saved_grib)
def test_cube_with_time_bounds(self):
cube = self._cube_with_time_bounds()
saved_grib = iris.util.create_temp_filename(suffix='.grib2')
self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib)
os.remove(saved_grib)
if __name__ == "__main__":
tests.main()
|
SusanJL/iris
|
lib/iris/tests/test_grib_save.py
|
Python
|
gpl-3.0
| 11,825
|
[
"NetCDF"
] |
e740d965d7f7fd069591fbdf7055469840b0a51374692ccdbb73de0923404540
|
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""the TOC file module"""
from audiotools import Sheet, SheetTrack, SheetIndex, SheetException
class TOCFile(Sheet):
def __init__(self, type, tracks, catalog=None, cd_text=None):
from sys import version_info
str_type = str if (version_info[0] >= 3) else unicode
assert(isinstance(type, str_type))
assert((catalog is None) or isinstance(catalog, str_type))
assert((cd_text is None) or isinstance(cd_text, CDText))
self.__type__ = type
for (i, t) in enumerate(tracks, 1):
t.__number__ = i
self.__tracks__ = tracks
self.__catalog__ = catalog
self.__cd_text__ = cd_text
def __repr__(self):
return "TOCFile({})".format(
", ".join(["{}={!r}".format(attr,
getattr(self, "__" + attr + "__"))
for attr in ["type",
"tracks",
"catalog",
"cd_text"]]))
@classmethod
def converted(cls, sheet, filename=None):
"""given a Sheet object, returns a TOCFile object"""
tracks = list(sheet)
metadata = sheet.get_metadata()
if metadata is not None:
if metadata.catalog is not None:
catalog = metadata.catalog
else:
catalog = None
cd_text = CDText.from_disc_metadata(metadata)
else:
catalog = None
cd_text = None
return cls(type=u"CD_DA",
tracks=[TOCTrack.converted(sheettrack=track,
next_sheettrack=next_track,
filename=filename)
for (track, next_track) in
zip(tracks, tracks[1:] + [None])],
catalog=catalog,
cd_text=cd_text)
def __len__(self):
return len(self.__tracks__)
def __getitem__(self, index):
return self.__tracks__[index]
def build(self):
"""returns the TOCFile as a string"""
output = [self.__type__, u""]
if self.__catalog__ is not None:
output.extend([
u"CATALOG {}".format(format_string(self.__catalog__)), u""])
if self.__cd_text__ is not None:
output.append(self.__cd_text__.build())
output.extend([track.build() for track in self.__tracks__])
return u"\n".join(output) + u"\n"
def get_metadata(self):
"""returns MetaData of Sheet, or None
this metadata often contains information such as catalog number
or CD-TEXT values"""
from audiotools import MetaData
if (self.__catalog__ is not None) and (self.__cd_text__ is not None):
metadata = self.__cd_text__.to_disc_metadata()
metadata.catalog = self.__catalog__
return metadata
elif self.__catalog__ is not None:
return MetaData(catalog=self.__catalog__)
elif self.__cd_text__ is not None:
return self.__cd_text__.to_disc_metadata()
else:
return None
class TOCTrack(SheetTrack):
def __init__(self, mode, flags, sub_channel_mode=None):
from audiotools import SheetIndex
from sys import version_info
str_type = str if (version_info[0] >= 3) else unicode
assert(isinstance(mode, str_type))
assert((sub_channel_mode is None) or
isinstance(sub_channel_mode, str_type))
self.__number__ = None # to be filled-in later
self.__mode__ = mode
self.__sub_channel_mode__ = sub_channel_mode
self.__flags__ = flags
indexes = []
pre_gap = None
file_start = None
file_length = None
for flag in flags:
if isinstance(flag, TOCFlag_FILE):
file_start = flag.start()
file_length = flag.length()
elif isinstance(flag, TOCFlag_START):
if flag.start() is not None:
pre_gap = flag.start()
else:
pre_gap = file_length
elif isinstance(flag, TOCFlag_INDEX):
indexes.append(flag.index())
if pre_gap is None:
# first index point is 1
self.__indexes__ = ([SheetIndex(number=1, offset=file_start)] +
[SheetIndex(number=i, offset=index)
for (i, index) in enumerate(indexes, 2)])
else:
# first index point is 0
self.__indexes__ = ([SheetIndex(number=0,
offset=file_start),
SheetIndex(number=1,
offset=file_start + pre_gap)] +
[SheetIndex(number=i, offset=index)
for (i, index) in enumerate(indexes, 2)])
@classmethod
def converted(cls, sheettrack, next_sheettrack, filename=None):
"""given a SheetTrack object, returns a TOCTrack object"""
metadata = sheettrack.get_metadata()
flags = []
if metadata is not None:
if metadata.ISRC is not None:
flags.append(TOCFlag_ISRC(metadata.ISRC))
cdtext = CDText.from_track_metadata(metadata)
if cdtext is not None:
flags.append(cdtext)
if sheettrack.copy_permitted():
flags.append(TOCFlag_COPY(True))
if sheettrack.pre_emphasis():
flags.append(TOCFlag_PRE_EMPHASIS(True))
if len(sheettrack) > 0:
if ((next_sheettrack is not None) and
(sheettrack.filename() == next_sheettrack.filename())):
length = (next_sheettrack[0].offset() -
sheettrack[0].offset())
else:
length = None
flags.append(TOCFlag_FILE(
type=u"AUDIOFILE",
filename=(filename if
filename is not None else
sheettrack.filename()),
start=sheettrack[0].offset(),
length=length))
if sheettrack[0].number() == 0:
# first index point is 0 so track contains pre-gap
flags.append(TOCFlag_START(sheettrack[1].offset() -
sheettrack[0].offset()))
for index in sheettrack[2:]:
flags.append(TOCFlag_INDEX(index.offset()))
else:
# track contains no pre-gap
for index in sheettrack[1:]:
flags.append(TOCFlag_INDEX(index.offset()))
return cls(mode=(u"AUDIO" if sheettrack.is_audio() else u"MODE1"),
flags=flags)
def first_flag(self, flag_class):
"""returns the first flag in the list with the given class
or None if not found"""
for flag in self.__flags__:
if isinstance(flag, flag_class):
return flag
else:
return None
def all_flags(self, flag_class):
"""returns a list of all flags in the list with the given class"""
return [f for f in self.__flags__ if isinstance(f, flag_class)]
def __repr__(self):
return "TOCTrack({})".format(
", ".join(["{}={!r}".format(attr,
getattr(self, "__" + attr + "__"))
for attr in ["number",
"mode",
"sub_channel_mode",
"flags"]]))
def __len__(self):
return len(self.__indexes__)
def __getitem__(self, index):
return self.__indexes__[index]
def number(self):
"""returns track's number as an integer"""
return self.__number__
def get_metadata(self):
"""returns SheetTrack's MetaData, or None"""
from audiotools import MetaData
isrc = self.first_flag(TOCFlag_ISRC)
cd_text = self.first_flag(CDText)
if (isrc is not None) and (cd_text is not None):
metadata = cd_text.to_track_metadata()
metadata.ISRC = isrc.isrc()
return metadata
elif cd_text is not None:
return cd_text.to_track_metadata()
elif isrc is not None:
return MetaData(ISRC=isrc.isrc())
else:
return None
def filename(self):
"""returns SheetTrack's filename as a string"""
filename = self.first_flag(TOCFlag_FILE)
if filename is not None:
return filename.filename()
else:
return u""
def is_audio(self):
"""returns True if track contains audio data"""
return self.__mode__ == u"AUDIO"
def pre_emphasis(self):
"""returns whether SheetTrack has pre-emphasis"""
pre_emphasis = self.first_flag(TOCFlag_PRE_EMPHASIS)
if pre_emphasis is not None:
return pre_emphasis.pre_emphasis()
else:
return False
def copy_permitted(self):
"""returns whether copying is permitted"""
copy = self.first_flag(TOCFlag_COPY)
if copy is not None:
return copy.copy()
else:
return False
def build(self):
"""returns the TOCTrack as a string"""
output = [(u"TRACK {}".format(self.__mode__) if
(self.__sub_channel_mode__ is None) else
u"TRACK {} {}".format(self.__mode__,
self.__sub_channel_mode__))]
output.extend([flag.build() for flag in self.__flags__])
output.append(u"")
return u"\n".join(output)
class TOCFlag(object):
def __init__(self, attrs):
self.__attrs__ = attrs
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join(["{}={!r}".format(attr,
getattr(self, "__" + attr + "__"))
for attr in self.__attrs__]))
def build(self):
"""returns the TOCTracFlag as a string"""
# implement this in TOCFlag subclasses
raise NotImplementedError()
class TOCFlag_COPY(TOCFlag):
def __init__(self, copy):
TOCFlag.__init__(self, ["copy"])
assert(isinstance(copy, bool))
self.__copy__ = copy
def copy(self):
return self.__copy__
def build(self):
return u"COPY" if self.__copy__ else u"NO COPY"
class TOCFlag_PRE_EMPHASIS(TOCFlag):
def __init__(self, pre_emphasis):
TOCFlag.__init__(self, ["pre_emphasis"])
assert(isinstance(pre_emphasis, bool))
self.__pre_emphasis__ = pre_emphasis
def pre_emphasis(self):
return self.__pre_emphasis__
def build(self):
return u"PRE_EMPHASIS" if self.__pre_emphasis__ else u"NO PRE_EMPHASIS"
class TOCFlag_CHANNELS(TOCFlag):
def __init__(self, channels):
TOCFlag.__init__(self, ["channels"])
assert((channels == 2) or (channels == 4))
self.__channels__ = channels
def build(self):
return (u"TWO_CHANNEL_AUDIO" if
(self.__channels__ == 2) else
u"FOUR_CHANNEL_AUDIO")
class TOCFlag_ISRC(TOCFlag):
def __init__(self, isrc):
TOCFlag.__init__(self, ["isrc"])
from sys import version_info
str_type = str if (version_info[0] >= 3) else unicode
assert(isinstance(isrc, str_type))
self.__isrc__ = isrc
def isrc(self):
return self.__isrc__
def build(self):
return u"ISRC {}".format(format_string(self.__isrc__))
class TOCFlag_FILE(TOCFlag):
def __init__(self, type, filename, start, length=None):
TOCFlag.__init__(self, ["type", "filename", "start", "length"])
from sys import version_info
from fractions import Fraction
str_type = str if (version_info[0] >= 3) else unicode
assert(isinstance(type, str_type))
assert(isinstance(filename, str_type))
assert(isinstance(start, Fraction))
assert((length is None) or isinstance(length, Fraction))
self.__type__ = type
self.__filename__ = filename
self.__start__ = start
self.__length__ = length
def filename(self):
return self.__filename__
def start(self):
return self.__start__
def length(self):
return self.__length__
def build(self):
if self.__length__ is None:
return u"{} {} {}".format(self.__type__,
format_string(self.__filename__),
format_timestamp(self.__start__))
else:
return u"{} {} {} {}".format(self.__type__,
format_string(self.__filename__),
format_timestamp(self.__start__),
format_timestamp(self.__length__))
class TOCFlag_START(TOCFlag):
def __init__(self, start=None):
TOCFlag.__init__(self, ["start"])
from fractions import Fraction
assert((start is None) or isinstance(start, Fraction))
self.__start__ = start
def start(self):
return self.__start__
def build(self):
if self.__start__ is None:
return u"START"
else:
return u"START {}".format(format_timestamp(self.__start__))
class TOCFlag_INDEX(TOCFlag):
def __init__(self, index):
TOCFlag.__init__(self, ["index"])
from fractions import Fraction
assert(isinstance(index, Fraction))
self.__index__ = index
def index(self):
return self.__index__
def build(self):
return u"INDEX {}".format(format_timestamp(self.__index__))
class CDText(object):
def __init__(self, languages, language_map=None):
self.__languages__ = languages
self.__language_map__ = language_map
def __repr__(self):
return "CDText(languages={!r}, language_map={!r})".format(
self.__languages__, self.__language_map__)
def get(self, key, default):
for language in self.__languages__:
try:
return language[key]
except KeyError:
pass
else:
return default
def build(self):
output = [u"CD_TEXT {"]
if self.__language_map__ is not None:
output.append(self.__language_map__.build())
output.append(u"")
output.extend([language.build() for language in self.__languages__])
output.append(u"}")
return u"\n".join(output)
def to_disc_metadata(self):
from audiotools import MetaData
return MetaData(
album_name=self.get(u"TITLE", None),
performer_name=self.get(u"PERFORMER", None),
artist_name=self.get(u"SONGWRITER", None),
composer_name=self.get(u"COMPOSER", None),
comment=self.get(u"MESSAGE", None))
@classmethod
def from_disc_metadata(cls, metadata):
text_pairs = []
if metadata is not None:
if metadata.album_name is not None:
text_pairs.append((u"TITLE", metadata.album_name))
if metadata.performer_name is not None:
text_pairs.append((u"PERFORMER", metadata.performer_name))
if metadata.artist_name is not None:
text_pairs.append((u"SONGWRITER", metadata.artist_name))
if metadata.composer_name is not None:
text_pairs.append((u"COMPOSER", metadata.composer_name))
if metadata.comment is not None:
text_pairs.append((u"MESSAGE", metadata.comment))
if len(text_pairs) > 0:
return cls(languages=[CDTextLanguage(language_id=0,
text_pairs=text_pairs)],
language_map=CDTextLanguageMap([(0, u"EN")]))
else:
return None
def to_track_metadata(self):
from audiotools import MetaData
return MetaData(
track_name=self.get(u"TITLE", None),
performer_name=self.get(u"PERFORMER", None),
artist_name=self.get(u"SONGWRITER", None),
composer_name=self.get(u"COMPOSER", None),
comment=self.get(u"MESSAGE", None),
ISRC=self.get(u"ISRC", None))
@classmethod
def from_track_metadata(cls, metadata):
text_pairs = []
if metadata is not None:
if metadata.track_name is not None:
text_pairs.append((u"TITLE", metadata.track_name))
if metadata.performer_name is not None:
text_pairs.append((u"PERFORMER", metadata.performer_name))
if metadata.artist_name is not None:
text_pairs.append((u"SONGWRITER", metadata.artist_name))
if metadata.composer_name is not None:
text_pairs.append((u"COMPOSER", metadata.composer_name))
if metadata.comment is not None:
text_pairs.append((u"MESSAGE", metadata.comment))
# ISRC is handled in its own flag
if len(text_pairs) > 0:
return cls(languages=[CDTextLanguage(language_id=0,
text_pairs=text_pairs)])
else:
return None
class CDTextLanguage(object):
def __init__(self, language_id, text_pairs):
self.__id__ = language_id
self.__text_pairs__ = text_pairs
def __repr__(self):
return "CDTextLanguage(language_id={!r}, text_pairs={!r})".format(
self.__id__, self.__text_pairs__)
def __len__(self):
return len(self.__text_pairs__)
def __getitem__(self, key):
for (k, v) in self.__text_pairs__:
if k == key:
return v
else:
raise KeyError(key)
def build(self):
output = [u"LANGUAGE {:d} {{".format(self.__id__)]
for (key, value) in self.__text_pairs__:
if key in {u"TOC_INFO1", u"TOC_INFO2", u"SIZE_INFO"}:
output.append(u" {} {}".format(key, format_binary(value)))
else:
output.append(u" {} {}".format(key, format_string(value)))
output.append(u"}")
return u"\n".join([u" " + l for l in output])
class CDTextLanguageMap(object):
def __init__(self, mapping):
self.__mapping__ = mapping
def __repr__(self):
return "CDTextLanguageMap(mapping={!r})".format(self.__mapping__)
def build(self):
output = [u"LANGUAGE_MAP {"]
output.extend([u" {:d} : {}".format(i, l)
for (i, l) in self.__mapping__])
output.append(u"}")
return u"\n".join([u" " + l for l in output])
def format_string(s):
return u"\"{}\"".format(s.replace(u'\\', u'\\\\').replace(u'"', u'\\"'))
def format_timestamp(t):
sectors = int(t * 75)
return u"{:02d}:{:02d}:{:02d}".format(sectors // 75 // 60,
sectors // 75 % 60,
sectors % 75)
def format_binary(s):
return u"{{{}}}".format(",".join([u"{:d}".format(int(c)) for c in s]))
def read_tocfile(filename):
"""returns a Sheet from a TOC filename on disk
raises TOCException if some error occurs reading or parsing the file
"""
try:
with open(filename, "rb") as f:
return read_tocfile_string(f.read().decode("UTF-8"))
except IOError:
raise SheetException("unable to open file")
def read_tocfile_string(tocfile):
"""given a unicode string of .toc data, returns a TOCFile object
raises SheetException if some error occurs parsing the file"""
import audiotools.ply.lex as lex
import audiotools.ply.yacc as yacc
from audiotools.ply.yacc import NullLogger
import audiotools.toc.tokrules
import audiotools.toc.yaccrules
from sys import version_info
str_type = str if (version_info[0] >= 3) else unicode
assert(isinstance(tocfile, str_type))
lexer = lex.lex(module=audiotools.toc.tokrules)
lexer.input(tocfile)
parser = yacc.yacc(module=audiotools.toc.yaccrules,
debug=0,
errorlog=NullLogger(),
write_tables=0)
try:
return parser.parse(lexer=lexer)
except ValueError as err:
raise SheetException(str(err))
def write_tocfile(sheet, filename, file):
"""given a Sheet object and filename unicode string,
writes a .toc file to the given file object"""
file.write(
TOCFile.converted(sheet, filename=filename).build().encode("UTF-8"))
|
tuffy/python-audio-tools
|
audiotools/toc/__init__.py
|
Python
|
gpl-2.0
| 21,790
|
[
"Brian"
] |
be094c7f1f230e3abed8d39a27b87342f3440383c93ad2671389c71f1699d728
|
from .util import get_value_by_version, make_enum, OS_NAME
# base command
_simulation_base_command_matrix = {
"windows": {
(0, 0): "RunEPlus.bat",
(8, 2): "energyplus"
},
"osx": {
(0, 0): "runenergyplus",
(8, 2): "energyplus"
},
"linux": {
(0, 0): "bin/runenergyplus",
(8, 1): "runenergyplus",
(8, 4): "energyplus"
}
}
def get_simulation_base_command():
commands = _simulation_base_command_matrix[OS_NAME]
return get_value_by_version(commands)
# inputs
SIMULATION_INPUT_COMMAND_STYLES = make_enum(
"simu_dir", # {simulation_dir_path}/{simulation_base_name}
"file_path", # file_path
)
_simulation_input_command_matrix = {
"windows": {
"idf": {
(0, 0): "simu_dir",
(8, 1): "file_path"
},
"epw": {
(0, 0): "simu_dir",
(8, 2): "file_path"
}
},
"osx": {
"idf": {
(0, 0): "simu_dir",
(8, 1): "file_path"
},
"epw": {
(0, 0): "file_path"
}
},
"linux": {
"idf": {
(0, 0): "file_path"
},
"epw": {
(0, 0): "file_path"
}
}
}
def get_simulation_input_command_style(extension):
if extension not in ("idf", "epw"):
raise ValueError(f"unknown extension: {extension}")
styles = _simulation_input_command_matrix[OS_NAME][extension]
return get_value_by_version(styles)
# command style
SIMULATION_COMMAND_STYLES = make_enum(
"args",
"kwargs",
)
_simulation_command_styles_matrix = {
"windows": {
(0, 0): "args",
(8, 2): "kwargs"
},
"osx": {
(0, 0): "args",
(8, 2): "kwargs"
},
"linux": {
(0, 0): "args",
(8, 5): "kwargs"
}
}
def get_simulation_command_style():
return get_value_by_version(_simulation_command_styles_matrix[OS_NAME])
|
Openergy/oplus
|
oplus/compatibility/simulation.py
|
Python
|
mpl-2.0
| 1,958
|
[
"EPW"
] |
e60e318dbd09dc1b7e8edb481cddf52ff27bc4a4ca5f3506dbdee0bf123fcf1a
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import array as pyarray
import warnings
if sys.version > '3':
xrange = range
basestring = str
from math import exp, log
from numpy import array, random, tile
from collections import namedtuple
from pyspark import SparkContext, since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, callJavaFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector, DenseVector
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.util import Saveable, Loader, inherit_doc, JavaLoader, JavaSaveable
from pyspark.streaming import DStream
__all__ = ['BisectingKMeansModel', 'BisectingKMeans', 'KMeansModel', 'KMeans',
'GaussianMixtureModel', 'GaussianMixture', 'PowerIterationClusteringModel',
'PowerIterationClustering', 'StreamingKMeans', 'StreamingKMeansModel',
'LDA', 'LDAModel']
@inherit_doc
class BisectingKMeansModel(JavaModelWrapper):
"""
A clustering model derived from the bisecting k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> bskm = BisectingKMeans()
>>> model = bskm.train(sc.parallelize(data, 2), k=4)
>>> p = array([0.0, 0.0])
>>> model.predict(p)
0
>>> model.k
4
>>> model.computeCost(p)
0.0
.. versionadded:: 2.0.0
"""
def __init__(self, java_model):
super(BisectingKMeansModel, self).__init__(java_model)
self.centers = [c.toArray() for c in self.call("clusterCenters")]
@property
@since('2.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy
arrays."""
return self.centers
@property
@since('2.0.0')
def k(self):
"""Get the number of clusters"""
return self.call("k")
@since('2.0.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("predict", vecs)
x = _convert_to_vector(x)
return self.call("predict", x)
@since('2.0.0')
def computeCost(self, x):
"""
Return the Bisecting K-means cost (sum of squared distances of
points to their nearest center) for this model on the given
data. If provided with an RDD of points returns the sum.
:param point:
A data point (or RDD of points) to compute the cost(s).
"""
if isinstance(x, RDD):
vecs = x.map(_convert_to_vector)
return self.call("computeCost", vecs)
return self.call("computeCost", _convert_to_vector(x))
class BisectingKMeans(object):
"""
A bisecting k-means algorithm based on the paper "A comparison of
document clustering techniques" by Steinbach, Karypis, and Kumar,
with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and
bisects each of them using k-means, until there are `k` leaf
clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped
together to increase parallelism. If bisecting all divisible
clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
Based on
`Steinbach, Karypis, and Kumar, A comparison of document clustering
techniques, KDD Workshop on Text Mining, 2000
<http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf>`_.
.. versionadded:: 2.0.0
"""
@classmethod
@since('2.0.0')
def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604):
"""
Runs the bisecting k-means algorithm return the model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
The desired number of leaf clusters. The actual number could
be smaller if there are no divisible leaf clusters.
(default: 4)
:param maxIterations:
Maximum number of iterations allowed to split clusters.
(default: 20)
:param minDivisibleClusterSize:
Minimum number of points (if >= 1.0) or the minimum proportion
of points (if < 1.0) of a divisible cluster.
(default: 1)
:param seed:
Random seed value for cluster initialization.
(default: -1888008604 from classOf[BisectingKMeans].getName.##)
"""
java_model = callMLlibFunc(
"trainBisectingKMeans", rdd.map(_convert_to_vector),
k, maxIterations, minDivisibleClusterSize, seed)
return BisectingKMeansModel(java_model)
@inherit_doc
class KMeansModel(Saveable, Loader):
"""A clustering model derived from the k-means method.
>>> data = array([0.0,0.0, 1.0,1.0, 9.0,8.0, 8.0,9.0]).reshape(4, 2)
>>> model = KMeans.train(
... sc.parallelize(data), 2, maxIterations=10, initializationMode="random",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0.0, 0.0])) == model.predict(array([1.0, 1.0]))
True
>>> model.predict(array([8.0, 9.0])) == model.predict(array([9.0, 8.0]))
True
>>> model.k
2
>>> model.computeCost(sc.parallelize(data))
2.0
>>> model = KMeans.train(sc.parallelize(data), 2)
>>> sparse_data = [
... SparseVector(3, {1: 1.0}),
... SparseVector(3, {1: 1.1}),
... SparseVector(3, {2: 1.0}),
... SparseVector(3, {2: 1.1})
... ]
>>> model = KMeans.train(sc.parallelize(sparse_data), 2, initializationMode="k-means||",
... seed=50, initializationSteps=5, epsilon=1e-4)
>>> model.predict(array([0., 1., 0.])) == model.predict(array([0, 1.1, 0.]))
True
>>> model.predict(array([0., 0., 1.])) == model.predict(array([0, 0, 1.1]))
True
>>> model.predict(sparse_data[0]) == model.predict(sparse_data[1])
True
>>> model.predict(sparse_data[2]) == model.predict(sparse_data[3])
True
>>> isinstance(model.clusterCenters, list)
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = KMeansModel.load(sc, path)
>>> sameModel.predict(sparse_data[0]) == model.predict(sparse_data[0])
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-383.1,-382.9, 28.7,31.2, 366.2,367.3]).reshape(3, 2)
>>> model = KMeans.train(sc.parallelize(data), 3, maxIterations=0,
... initialModel = KMeansModel([(-1000.0,-1000.0),(5.0,5.0),(1000.0,1000.0)]))
>>> model.clusterCenters
[array([-1000., -1000.]), array([ 5., 5.]), array([ 1000., 1000.])]
.. versionadded:: 0.9.0
"""
def __init__(self, centers):
self.centers = centers
@property
@since('1.0.0')
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return self.centers
@property
@since('1.4.0')
def k(self):
"""Total number of clusters."""
return len(self.centers)
@since('0.9.0')
def predict(self, x):
"""
Find the cluster that each of the points belongs to in this
model.
:param x:
A data point (or RDD of points) to determine cluster index.
:return:
Predicted cluster index or an RDD of predicted cluster indices
if the input is an RDD.
"""
best = 0
best_distance = float("inf")
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
for i in xrange(len(self.centers)):
distance = x.squared_distance(self.centers[i])
if distance < best_distance:
best = i
best_distance = distance
return best
@since('1.4.0')
def computeCost(self, rdd):
"""
Return the K-means cost (sum of squared distances of points to
their nearest center) for this model on the given
data.
:param rdd:
The RDD of points to compute the cost on.
"""
cost = callMLlibFunc("computeCostKmeansModel", rdd.map(_convert_to_vector),
[_convert_to_vector(c) for c in self.centers])
return cost
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_centers = _py2java(sc, [_convert_to_vector(c) for c in self.centers])
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel(java_centers)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.clustering.KMeansModel.load(sc._jsc.sc(), path)
return KMeansModel(_java2py(sc, java_model.clusterCenters()))
class KMeans(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||",
seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None):
"""
Train a k-means clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of clusters to create.
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param runs:
This param has no effect since Spark 2.0.0.
:param initializationMode:
The initialization algorithm. This can be either "random" or
"k-means||".
(default: "k-means||")
:param seed:
Random seed value for cluster initialization. Set as None to
generate seed based on system time.
(default: None)
:param initializationSteps:
Number of steps for the k-means|| initialization mode.
This is an advanced setting -- the default of 2 is almost
always enough.
(default: 2)
:param epsilon:
Distance threshold within which a center will be considered to
have converged. If all centers move less than this Euclidean
distance, iterations are stopped.
(default: 1e-4)
:param initialModel:
Initial cluster centers can be provided as a KMeansModel object
rather than using the random or k-means|| initializationModel.
(default: None)
"""
if runs != 1:
warnings.warn("The param `runs` has no effect since Spark 2.0.0.")
clusterInitialModel = []
if initialModel is not None:
if not isinstance(initialModel, KMeansModel):
raise Exception("initialModel is of "+str(type(initialModel))+". It needs "
"to be of <type 'KMeansModel'>")
clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters]
model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations,
runs, initializationMode, seed, initializationSteps, epsilon,
clusterInitialModel)
centers = callJavaFunc(rdd.context, model.clusterCenters)
return KMeansModel([c.toArray() for c in centers])
@inherit_doc
class GaussianMixtureModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A clustering model derived from the Gaussian Mixture Model method.
>>> from pyspark.mllib.linalg import Vectors, DenseMatrix
>>> from numpy.testing import assert_equal
>>> from shutil import rmtree
>>> import os, tempfile
>>> clusterdata_1 = sc.parallelize(array([-0.1,-0.05,-0.01,-0.1,
... 0.9,0.8,0.75,0.935,
... -0.83,-0.68,-0.91,-0.76 ]).reshape(6, 2), 2)
>>> model = GaussianMixture.train(clusterdata_1, 3, convergenceTol=0.0001,
... maxIterations=50, seed=10)
>>> labels = model.predict(clusterdata_1).collect()
>>> labels[0]==labels[1]
False
>>> labels[1]==labels[2]
False
>>> labels[4]==labels[5]
True
>>> model.predict([-0.1,-0.05])
0
>>> softPredicted = model.predictSoft([-0.1,-0.05])
>>> abs(softPredicted[0] - 1.0) < 0.001
True
>>> abs(softPredicted[1] - 0.0) < 0.001
True
>>> abs(softPredicted[2] - 0.0) < 0.001
True
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = GaussianMixtureModel.load(sc, path)
>>> assert_equal(model.weights, sameModel.weights)
>>> mus, sigmas = list(
... zip(*[(g.mu, g.sigma) for g in model.gaussians]))
>>> sameMus, sameSigmas = list(
... zip(*[(g.mu, g.sigma) for g in sameModel.gaussians]))
>>> mus == sameMus
True
>>> sigmas == sameSigmas
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
>>> data = array([-5.1971, -2.5359, -3.8220,
... -5.2211, -5.0602, 4.7118,
... 6.8989, 3.4592, 4.6322,
... 5.7048, 4.6567, 5.5026,
... 4.5605, 5.2043, 6.2734])
>>> clusterdata_2 = sc.parallelize(data.reshape(5,3))
>>> model = GaussianMixture.train(clusterdata_2, 2, convergenceTol=0.0001,
... maxIterations=150, seed=4)
>>> labels = model.predict(clusterdata_2).collect()
>>> labels[0]==labels[1]
True
>>> labels[2]==labels[3]==labels[4]
True
.. versionadded:: 1.3.0
"""
@property
@since('1.4.0')
def weights(self):
"""
Weights for each Gaussian distribution in the mixture, where weights[i] is
the weight for Gaussian i, and weights.sum == 1.
"""
return array(self.call("weights"))
@property
@since('1.4.0')
def gaussians(self):
"""
Array of MultivariateGaussian where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i.
"""
return [
MultivariateGaussian(gaussian[0], gaussian[1])
for gaussian in self.call("gaussians")]
@property
@since('1.4.0')
def k(self):
"""Number of gaussians in mixture."""
return len(self.weights)
@since('1.3.0')
def predict(self, x):
"""
Find the cluster to which the point 'x' or each point in RDD 'x'
has maximum membership in this model.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
Predicted cluster label or an RDD of predicted cluster labels
if the input is an RDD.
"""
if isinstance(x, RDD):
cluster_labels = self.predictSoft(x).map(lambda z: z.index(max(z)))
return cluster_labels
else:
z = self.predictSoft(x)
return z.argmax()
@since('1.3.0')
def predictSoft(self, x):
"""
Find the membership of point 'x' or each point in RDD 'x' to all mixture components.
:param x:
A feature vector or an RDD of vectors representing data points.
:return:
The membership value to all mixture components for vector 'x'
or each vector in RDD 'x'.
"""
if isinstance(x, RDD):
means, sigmas = zip(*[(g.mu, g.sigma) for g in self.gaussians])
membership_matrix = callMLlibFunc("predictSoftGMM", x.map(_convert_to_vector),
_convert_to_vector(self.weights), means, sigmas)
return membership_matrix.map(lambda x: pyarray.array('d', x))
else:
return self.call("predictSoft", _convert_to_vector(x)).toArray()
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the GaussianMixtureModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
model = cls._load_java(sc, path)
wrapper = sc._jvm.org.apache.spark.mllib.api.python.GaussianMixtureModelWrapper(model)
return cls(wrapper)
class GaussianMixture(object):
"""
Learning algorithm for Gaussian Mixtures using the expectation-maximization algorithm.
.. versionadded:: 1.3.0
"""
@classmethod
@since('1.3.0')
def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None):
"""
Train a Gaussian Mixture clustering model.
:param rdd:
Training points as an `RDD` of `Vector` or convertible
sequence types.
:param k:
Number of independent Gaussians in the mixture model.
:param convergenceTol:
Maximum change in log-likelihood at which convergence is
considered to have occurred.
(default: 1e-3)
:param maxIterations:
Maximum number of iterations allowed.
(default: 100)
:param seed:
Random seed for initial Gaussian distribution. Set as None to
generate seed based on system time.
(default: None)
:param initialModel:
Initial GMM starting point, bypassing the random
initialization.
(default: None)
"""
initialModelWeights = None
initialModelMu = None
initialModelSigma = None
if initialModel is not None:
if initialModel.k != k:
raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s"
% (initialModel.k, k))
initialModelWeights = list(initialModel.weights)
initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)]
initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)]
java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector),
k, convergenceTol, maxIterations, seed,
initialModelWeights, initialModelMu, initialModelSigma)
return GaussianMixtureModel(java_model)
class PowerIterationClusteringModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
Model produced by [[PowerIterationClustering]].
>>> import math
>>> def genCircle(r, n):
... points = []
... for i in range(0, n):
... theta = 2.0 * math.pi * i / n
... points.append((r * math.cos(theta), r * math.sin(theta)))
... return points
>>> def sim(x, y):
... dist2 = (x[0] - y[0]) * (x[0] - y[0]) + (x[1] - y[1]) * (x[1] - y[1])
... return math.exp(-dist2 / 2.0)
>>> r1 = 1.0
>>> n1 = 10
>>> r2 = 4.0
>>> n2 = 40
>>> n = n1 + n2
>>> points = genCircle(r1, n1) + genCircle(r2, n2)
>>> similarities = [(i, j, sim(points[i], points[j])) for i in range(1, n) for j in range(0, i)]
>>> rdd = sc.parallelize(similarities, 2)
>>> model = PowerIterationClustering.train(rdd, 2, 40)
>>> model.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = PowerIterationClusteringModel.load(sc, path)
>>> sameModel.k
2
>>> result = sorted(model.assignments().collect(), key=lambda x: x.id)
>>> result[0].cluster == result[1].cluster == result[2].cluster == result[3].cluster
True
>>> result[4].cluster == result[5].cluster == result[6].cluster == result[7].cluster
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@property
@since('1.5.0')
def k(self):
"""
Returns the number of clusters.
"""
return self.call("k")
@since('1.5.0')
def assignments(self):
"""
Returns the cluster assignments of this model.
"""
return self.call("getAssignments").map(
lambda x: (PowerIterationClustering.Assignment(*x)))
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
model = cls._load_java(sc, path)
wrapper =\
sc._jvm.org.apache.spark.mllib.api.python.PowerIterationClusteringModelWrapper(model)
return PowerIterationClusteringModel(wrapper)
class PowerIterationClustering(object):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm
developed by [[http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf Lin and Cohen]].
From the abstract: PIC finds a very low-dimensional embedding of a
dataset using truncated power iteration on a normalized pair-wise
similarity matrix of the data.
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k, maxIterations=100, initMode="random"):
r"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random")
"""
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model)
class Assignment(namedtuple("Assignment", ["id", "cluster"])):
"""
Represents an (id, cluster) tuple.
.. versionadded:: 1.5.0
"""
class StreamingKMeansModel(KMeansModel):
"""
Clustering model which can perform an online update of the centroids.
The update formula for each centroid is given by
* c_t+1 = ((c_t * n_t * a) + (x_t * m_t)) / (n_t + m_t)
* n_t+1 = n_t * a + m_t
where
* c_t: Centroid at the n_th iteration.
* n_t: Number of samples (or) weights associated with the centroid
at the n_th iteration.
* x_t: Centroid of the new data closest to c_t.
* m_t: Number of samples (or) weights of the new data closest to c_t
* c_t+1: New centroid.
* n_t+1: New number of weights.
* a: Decay Factor, which gives the forgetfulness.
.. note:: If a is set to 1, it is the weighted mean of the previous
and new data. If it set to zero, the old centroids are completely
forgotten.
:param clusterCenters:
Initial cluster centers.
:param clusterWeights:
List of weights assigned to each cluster.
>>> initCenters = [[0.0, 0.0], [1.0, 1.0]]
>>> initWeights = [1.0, 1.0]
>>> stkm = StreamingKMeansModel(initCenters, initWeights)
>>> data = sc.parallelize([[-0.1, -0.1], [0.1, 0.1],
... [0.9, 0.9], [1.1, 1.1]])
>>> stkm = stkm.update(data, 1.0, u"batches")
>>> stkm.centers
array([[ 0., 0.],
[ 1., 1.]])
>>> stkm.predict([-0.1, -0.1])
0
>>> stkm.predict([0.9, 0.9])
1
>>> stkm.clusterWeights
[3.0, 3.0]
>>> decayFactor = 0.0
>>> data = sc.parallelize([DenseVector([1.5, 1.5]), DenseVector([0.2, 0.2])])
>>> stkm = stkm.update(data, 0.0, u"batches")
>>> stkm.centers
array([[ 0.2, 0.2],
[ 1.5, 1.5]])
>>> stkm.clusterWeights
[1.0, 1.0]
>>> stkm.predict([0.2, 0.2])
0
>>> stkm.predict([1.5, 1.5])
1
.. versionadded:: 1.5.0
"""
def __init__(self, clusterCenters, clusterWeights):
super(StreamingKMeansModel, self).__init__(centers=clusterCenters)
self._clusterWeights = list(clusterWeights)
@property
@since('1.5.0')
def clusterWeights(self):
"""Return the cluster weights."""
return self._clusterWeights
@ignore_unicode_prefix
@since('1.5.0')
def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the power of number of new points and if batches,
then decay factor will be used as is.
"""
if not isinstance(data, RDD):
raise TypeError("Data should be of an RDD, got %s." % type(data))
data = data.map(_convert_to_vector)
decayFactor = float(decayFactor)
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
vectorCenters = [_convert_to_vector(center) for center in self.centers]
updatedModel = callMLlibFunc(
"updateStreamingKMeansModel", vectorCenters, self._clusterWeights,
data, decayFactor, timeUnit)
self.centers = array(updatedModel[0])
self._clusterWeights = list(updatedModel[1])
return self
class StreamingKMeans(object):
"""
Provides methods to set k, decayFactor, timeUnit to configure the
KMeans algorithm for fitting and predicting on incoming dstreams.
More details on how the centroids are updated are provided under the
docs of StreamingKMeansModel.
:param k:
Number of clusters.
(default: 2)
:param decayFactor:
Forgetfulness of the previous centroids.
(default: 1.0)
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor is
raised to the power of number of new points and if batches, then
decay factor will be used as is.
(default: "batches")
.. versionadded:: 1.5.0
"""
def __init__(self, k=2, decayFactor=1.0, timeUnit="batches"):
self._k = k
self._decayFactor = decayFactor
if timeUnit not in ["batches", "points"]:
raise ValueError(
"timeUnit should be 'batches' or 'points', got %s." % timeUnit)
self._timeUnit = timeUnit
self._model = None
@since('1.5.0')
def latestModel(self):
"""Return the latest model"""
return self._model
def _validate(self, dstream):
if self._model is None:
raise ValueError(
"Initial centers should be set either by setInitialCenters "
"or setRandomCenters.")
if not isinstance(dstream, DStream):
raise TypeError(
"Expected dstream to be of type DStream, "
"got type %s" % type(dstream))
@since('1.5.0')
def setK(self, k):
"""Set number of clusters."""
self._k = k
return self
@since('1.5.0')
def setDecayFactor(self, decayFactor):
"""Set decay factor."""
self._decayFactor = decayFactor
return self
@since('1.5.0')
def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self
@since('1.5.0')
def setInitialCenters(self, centers, weights):
"""
Set initial centers. Should be set before calling trainOn.
"""
self._model = StreamingKMeansModel(centers, weights)
return self
@since('1.5.0')
def setRandomCenters(self, dim, weight, seed):
"""
Set the initial centres to be random samples from
a gaussian population with constant weights.
"""
rng = random.RandomState(seed)
clusterCenters = rng.randn(self._k, dim)
clusterWeights = tile(weight, self._k)
self._model = StreamingKMeansModel(clusterCenters, clusterWeights)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
self._model.update(rdd, self._decayFactor, self._timeUnit)
dstream.foreachRDD(update)
@since('1.5.0')
def predictOn(self, dstream):
"""
Make predictions on a dstream.
Returns a transformed dstream object
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since('1.5.0')
def predictOnValues(self, dstream):
"""
Make predictions on a keyed dstream.
Returns a transformed dstream object.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
""" A clustering model derived from the LDA method.
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology
- "word" = "term": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over words representing some concept
References:
- Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
>>> from pyspark.mllib.linalg import Vectors
>>> from numpy.testing import assert_almost_equal, assert_equal
>>> data = [
... [1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
>>> model = LDA.train(rdd, k=2, seed=1)
>>> model.vocabSize()
2
>>> model.describeTopics()
[([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
>>> model.describeTopics(1)
[([1], [0.5...]), ([0], [0.5...])]
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
>>> import os, tempfile
>>> from shutil import rmtree
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = LDAModel.load(sc, path)
>>> assert_equal(sameModel.topicsMatrix(), model.topicsMatrix())
>>> sameModel.vocabSize() == model.vocabSize()
True
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.5.0
"""
@since('1.5.0')
def topicsMatrix(self):
"""Inferred topics, where each topic is represented by a distribution over terms."""
return self.call("topicsMatrix").toArray()
@since('1.5.0')
def vocabSize(self):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
@since('1.6.0')
def describeTopics(self, maxTermsPerTopic=None):
"""Return the topics described by weighted terms.
WARNING: If vocabSize and k are large, this can return a large object!
:param maxTermsPerTopic:
Maximum number of terms to collect for each topic.
(default: vocabulary size)
:return:
Array over topics. Each topic is represented as a pair of
matching arrays: (term indices, term weights in topic).
Each topic's terms are sorted in order of decreasing weight.
"""
if maxTermsPerTopic is None:
topics = self.call("describeTopics")
else:
topics = self.call("describeTopics", maxTermsPerTopic)
return topics
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""Load the LDAModel from disk.
:param sc:
SparkContext.
:param path:
Path to where the model is stored.
"""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
model = callMLlibFunc("loadLDAModel", sc, path)
return LDAModel(model)
class LDA(object):
"""
.. versionadded:: 1.5.0
"""
@classmethod
@since('1.5.0')
def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term count vectors are "bags of
words" with a fixed-size vocabulary (where the vocabulary size
is the length of the vector). Document IDs must be unique
and >= 0.
:param k:
Number of topics to infer, i.e., the number of soft cluster
centers.
(default: 10)
:param maxIterations:
Maximum number of iterations allowed.
(default: 20)
:param docConcentration:
Concentration parameter (commonly named "alpha") for the prior
placed on documents' distributions over topics ("theta").
(default: -1.0)
:param topicConcentration:
Concentration parameter (commonly named "beta" or "eta") for
the prior placed on topics' distributions over terms.
(default: -1.0)
:param seed:
Random seed for cluster initialization. Set as None to generate
seed based on system time.
(default: None)
:param checkpointInterval:
Period (in iterations) between checkpoints.
(default: 10)
:param optimizer:
LDAOptimizer used to perform the actual calculation. Currently
"em", "online" are supported.
(default: "em")
"""
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations,
docConcentration, topicConcentration, seed,
checkpointInterval, optimizer)
return LDAModel(model)
def _test():
import doctest
import numpy
import pyspark.mllib.clustering
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.mllib.clustering.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
actuaryzhang/spark
|
python/pyspark/mllib/clustering.py
|
Python
|
apache-2.0
| 37,050
|
[
"Gaussian"
] |
755850cd9bd722dbbcd74a0791edde1a83f3d8763ab8b3d52fe90e51922d1835
|
from datetime import datetime
from sqlalchemy import *
from sqlalchemy.orm import class_mapper
from turbogears import config
from turbogears.util import load_class
from turbogears.visit.api import BaseVisitManager, Visit
from turbogears.database import get_engine, metadata, session, mapper
import logging
log = logging.getLogger(__name__)
class FuncWebVisitManager(BaseVisitManager):
def __init__(self, timeout):
super(FuncWebVisitManager,self).__init__(timeout)
self.visits = {}
def create_model(self):
pass
def new_visit_with_key(self, visit_key):
log.debug("new_visit_with_key(%s)" % visit_key)
created = datetime.now()
visit = Visit(visit_key, True)
visit.visit_key = visit_key
visit.created = created
visit.expiry = created + self.timeout
self.visits[visit_key] = visit
log.debug("returning %s" % visit)
return visit
def visit_for_key(self, visit_key):
'''
Return the visit for this key or None if the visit doesn't exist or has
expired.
'''
log.debug("visit_for_key(%s)" % visit_key)
if not self.visits.has_key(visit_key):
return None
visit = self.visits[visit_key]
if not visit:
return None
now = datetime.now(visit.expiry.tzinfo)
if visit.expiry < now:
return None
visit.is_new = False
log.debug("returning %s" % visit)
return visit
|
dockerera/func
|
funcweb/funcweb/identity/visit.py
|
Python
|
gpl-2.0
| 1,504
|
[
"VisIt"
] |
7b13911fd5fca3f79aeb7be9ac284b00295eac25473d1662c23a00a7f2ab44b3
|
"""This demo solves the Stokes equations using an iterative linear solver.
Note that the sign for the pressure has been flipped for symmetry."""
# Copyright (C) 2010 Garth N. Wells
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2010-08-08
# Last changed: 2010-08-08
# Begin demo
from dolfin import *
# Test for PETSc or Epetra
if not has_linear_algebra_backend("PETSc") and not has_linear_algebra_backend("Epetra"):
info("DOLFIN has not been configured with Trilinos or PETSc. Exiting.")
exit()
if not has_krylov_solver_preconditioner("amg"):
info("Sorry, this demo is only available when DOLFIN is compiled with AMG "
"preconditioner, Hypre or ML.");
exit()
# Load mesh
mesh = UnitCubeMesh(16, 16, 16)
# Define function spaces
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
W = V * Q
# Boundaries
def right(x, on_boundary): return x[0] > (1.0 - DOLFIN_EPS)
def left(x, on_boundary): return x[0] < DOLFIN_EPS
def top_bottom(x, on_boundary):
return x[1] > 1.0 - DOLFIN_EPS or x[1] < DOLFIN_EPS
# No-slip boundary condition for velocity
noslip = Constant((0.0, 0.0, 0.0))
bc0 = DirichletBC(W.sub(0), noslip, top_bottom)
# Inflow boundary condition for velocity
inflow = Expression(("-sin(x[1]*pi)", "0.0", "0.0"))
bc1 = DirichletBC(W.sub(0), inflow, right)
# Boundary condition for pressure at outflow
zero = Constant(0)
bc2 = DirichletBC(W.sub(1), zero, left)
# Collect boundary conditions
bcs = [bc0, bc1, bc2]
# Define variational problem
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
f = Constant((0.0, 0.0, 0.0))
a = inner(grad(u), grad(v))*dx + div(v)*p*dx + q*div(u)*dx
L = inner(f, v)*dx
# Form for use in constructing preconditioner matrix
b = inner(grad(u), grad(v))*dx + p*q*dx
# Assemble system
A, bb = assemble_system(a, L, bcs)
# Assemble preconditioner system
P, btmp = assemble_system(b, L, bcs)
# Create Krylov solver and AMG preconditioner
solver = KrylovSolver("tfqmr", "amg")
# Associate operator (A) and preconditioner matrix (P)
solver.set_operators(A, P)
# Solve
U = Function(W)
solver.solve(U.vector(), bb)
# Get sub-functions
u, p = U.split()
# Save solution in VTK format
ufile_pvd = File("velocity.pvd")
ufile_pvd << u
pfile_pvd = File("pressure.pvd")
pfile_pvd << p
# Plot solution
plot(u)
plot(p)
interactive()
|
alogg/dolfin
|
demo/pde/stokes-iterative/python/demo_stokes-iterative.py
|
Python
|
gpl-3.0
| 2,955
|
[
"VTK"
] |
1d81f05beefd369fa48dd2404be022d4561dd6dc74f0e233f943659a69ac9eaa
|
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileModifiedEvent
import sys
from subprocess import call
from splinter.browser import Browser
firefox = Browser()
class MdCompileHandler(FileSystemEventHandler):
def on_modified(self, event):
if isinstance(event, FileModifiedEvent) and event.src_path.endswith('.md'):
print 'Change in %s detected... Recompiling markdown' % event.src_path.split('/')[-1]
call(['landslide', sys.argv[1]])
firefox.visit('file:///' + event.src_path.replace('.md', '.html'))
if __name__ == "__main__":
event_handler = MdCompileHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
andersoncardoso/small_scripts
|
landslide_presentation/python_brasil7/md_watchdog.py
|
Python
|
mit
| 914
|
[
"VisIt"
] |
ededd203a3fdf75868c14b12e0704039f26d407bebbd256d222e9a1d66410554
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coordinates a global build of a Spinnaker "release".
The term "release" here is more of an encapsulated build. This is not
an official release. It is meant for developers.
The gradle script does not yet coordinate a complete build, so
this script fills that gap for the time being. It triggers all
the subsystem builds and then publishes the resulting artifacts.
Publishing is typically to bintray. It is currently possible to publish
to a filesystem or storage bucket but this option will be removed soon
since the installation from these sources is no longer supported.
Usage:
export BINTRAY_USER=
export BINTRAY_KEY=
# subject/repository are the specific bintray repository
# owner and name components that specify the repository you are updating.
# The repository must already exist, but can be empty.
BINTRAY_REPOSITORY=subject/repository
# cd <build root containing subsystem subdirectories>
# this is where you ran refresh_source.sh from
./spinnaker/dev/build_release.sh --bintray_repo=$BINTRAY_REPOSITORY
"""
import argparse
import base64
import collections
import fnmatch
import os
import multiprocessing
import multiprocessing.pool
import re
import shutil
import subprocess
import sys
import tempfile
import urllib2
import zipfile
from urllib2 import HTTPError
import refresh_source
from spinnaker.run import check_run_quick
from spinnaker.run import run_quick
SUBSYSTEM_LIST = ['clouddriver', 'orca', 'front50',
'echo', 'rosco', 'gate', 'igor', 'fiat', 'deck', 'spinnaker']
def ensure_gcs_bucket(name, project=''):
"""Ensure that the desired GCS bucket exists, creating it if needed.
Args:
name [string]: The bucket name.
project [string]: Optional Google Project id that will own the bucket.
If none is provided, then the bucket will be associated with the default
bucket configured to gcloud.
Raises:
RuntimeError if the bucket could not be created.
"""
bucket = 'gs://'+ name
if not project:
config_result = run_quick('gcloud config list', echo=False)
error = None
if config_result.returncode:
error = 'Could not run gcloud: {error}'.format(
error=config_result.stdout)
else:
match = re.search('(?m)^project = (.*)', config_result.stdout)
if not match:
error = ('gcloud is not configured with a default project.\n'
'run gcloud config or provide a --google_project.\n')
if error:
raise SystemError(error)
project = match.group(1)
list_result = run_quick('gsutil list -p ' + project, echo=False)
if list_result.returncode:
error = ('Could not create Google Cloud Storage bucket'
'"{name}" in project "{project}":\n{error}'
.format(name=name, project=project, error=list_result.stdout))
raise RuntimeError(error)
if re.search('(?m)^{bucket}/\n'.format(bucket=bucket), list_result.stdout):
sys.stderr.write(
'WARNING: "{bucket}" already exists. Overwriting.\n'.format(
bucket=bucket))
else:
print 'Creating GCS bucket "{bucket}" in project "{project}".'.format(
bucket=bucket, project=project)
check_run_quick('gsutil mb -p {project} {bucket}'
.format(project=project, bucket=bucket),
echo=True)
def ensure_s3_bucket(name, region=""):
"""Ensure that the desired S3 bucket exists, creating it if needed.
Args:
name [string]: The bucket name.
region [string]: The S3 region for the bucket. If empty use aws default.
Raises:
RuntimeError if the bucket could not be created.
"""
bucket = 's3://' + name
list_result = run_quick('aws s3 ls ' + bucket, echo=False)
if not list_result.returncode:
sys.stderr.write(
'WARNING: "{bucket}" already exists. Overwriting.\n'.format(
bucket=bucket))
else:
print 'Creating S3 bucket "{bucket}"'.format(bucket=bucket)
command = 'aws s3 mb ' + bucket
if region:
command += ' --region ' + region
check_run_quick(command, echo=False)
class BackgroundProcess(
collections.namedtuple('BackgroundProcess', ['name', 'subprocess'])):
"""Denotes a running background process.
Attributes:
name [string]: The visible name of the process for reporting.
subprocess [subprocess]: The subprocess instance.
"""
@staticmethod
def spawn(name, args):
sp = subprocess.Popen(args, shell=True, close_fds=True,
stdout=sys.stdout, stderr=subprocess.STDOUT)
return BackgroundProcess(name, sp)
def wait(self):
if not self.subprocess:
return None
return self.subprocess.wait()
def check_wait(self):
if self.wait():
error = '{name} failed.'.format(name=self.name)
raise SystemError(error)
NO_PROCESS = BackgroundProcess('nop', None)
def determine_project_root():
return os.path.abspath(os.path.dirname(__file__) + '/..')
def determine_package_version(gradle_root, submodule):
with open(os.path.join(gradle_root, submodule,
'build/debian/control')) as f:
content = f.read()
match = re.search('(?m)^Version: (.*)', content)
return match.group(1)
class Builder(object):
"""Knows how to coordinate a Spinnaker release."""
def __init__(self, options):
self.__package_list = []
self.__build_failures = []
self.__background_processes = []
os.environ['NODE_ENV'] = os.environ.get('NODE_ENV', 'dev')
self.__options = options
self.refresher = refresh_source.Refresher(options)
if options.bintray_repo:
self.__verify_bintray()
# NOTE(ewiseblatt):
# This is the GCE directory.
# Ultimately we'll want to go to the root directory and install
# standard stuff and gce stuff.
self.__project_dir = determine_project_root()
self.__release_dir = options.release_path
if self.__release_dir.startswith('gs://'):
ensure_gcs_bucket(name=self.__release_dir[5:].split('/')[0],
project=options.google_project)
elif self.__release_dir.startswith('s3://'):
ensure_s3_bucket(name=self.__release_dir[5:].split('/')[0],
region=options.aws_region)
def determine_gradle_root(self, name):
gradle_root = (name if name != 'spinnaker'
else os.path.join(self.__project_dir, 'experimental/buildDeb'))
gradle_root = name if name != 'spinnaker' else self.__project_dir
return gradle_root
def start_build_target(self, name, target):
"""Start a subprocess to build the designated target.
Args:
name [string]: The name of the subsystem repository.
target [string]: The gradle build target.
Returns:
BackgroundProcess
"""
# Currently spinnaker is in a separate location
gradle_root = self.determine_gradle_root(name)
print 'Building {name}...'.format(name=name)
return BackgroundProcess.spawn(
'Building {name}'.format(name=name),
'cd "{gradle_root}"; ./gradlew {target}'.format(
gradle_root=gradle_root, target=target))
def publish_to_bintray(self, source, package, version, path, debian_tags=''):
bintray_key = os.environ['BINTRAY_KEY']
bintray_user = os.environ['BINTRAY_USER']
parts = self.__options.bintray_repo.split('/')
if len(parts) != 2:
raise ValueError(
'Expected --bintray_repo to be in the form <owner>/<repo')
subject, repo = parts[0], parts[1]
deb_filename = os.path.basename(path)
if (deb_filename.startswith('spinnaker-')
and not package.startswith('spinnaker')):
package = 'spinnaker-' + package
if debian_tags and debian_tags[0] != ';':
debian_tags = ';' + debian_tags
url = ('https://api.bintray.com/content'
'/{subject}/{repo}/{package}/{version}/{path}'
'{debian_tags}'
';publish=1;override=1'
.format(subject=subject, repo=repo, package=package,
version=version, path=path,
debian_tags=debian_tags))
with open(source, 'r') as f:
data = f.read()
put_request = urllib2.Request(url)
encoded_auth = base64.encodestring('{user}:{pwd}'.format(
user=bintray_user, pwd=bintray_key))[:-1] # strip eoln
put_request.add_header('Authorization', 'Basic ' + encoded_auth)
put_request.get_method = lambda: 'PUT'
try:
result = urllib2.urlopen(put_request, data)
except HTTPError as put_error:
if put_error.code == 409 and self.__options.wipe_package_on_409:
# The problem here is that BinTray does not allow packages to change once
# they have been published (even though we are explicitly asking it to
# override). PATCH wont work either.
# Since we are building from source, we don't really have a version
# yet, since we are still modifying the code. Either we need to generate a new
# version number every time or we don't want to publish these.
# Ideally we could control whether or not to publish. However,
# if we do not publish, then the repository will not be visible without
# credentials, and adding conditional credentials into the packer scripts
# starts getting even more complex.
#
# We cannot seem to delete individual versions either (at least not for
# InstallSpinnaker.sh, which is where this problem seems to occur),
# so we'll be heavy handed and wipe the entire package.
print 'Got 409 on {url}.'.format(url=url)
delete_url = ('https://api.bintray.com/content'
'/{subject}/{repo}/{path}'
.format(subject=subject, repo=repo, path=path))
print 'Attempt to delete url={url} then retry...'.format(url=delete_url)
delete_request = urllib2.Request(delete_url)
delete_request.add_header('Authorization', 'Basic ' + encoded_auth)
delete_request.get_method = lambda: 'DELETE'
try:
urllib2.urlopen(delete_request)
print 'Deleted...'
except HTTPError as ex:
# Maybe it didn't exist. Try again anyway.
print 'Delete {url} got {ex}. Try again anyway.'.format(url=url, ex=ex)
pass
print 'Retrying {url}'.format(url=url)
result = urllib2.urlopen(put_request, data)
print 'SUCCESS'
elif put_error.code != 400:
raise
else:
# Try creating the package and retrying.
pkg_url = os.path.join('https://api.bintray.com/packages',
subject, repo)
print 'Creating an entry for {package} with {pkg_url}...'.format(
package=package, pkg_url=pkg_url)
# All the packages are from spinnaker so we'll hardcode it.
pkg_data = """{{
"name": "{package}",
"licenses": ["Apache-2.0"],
"vcs_url": "https://github.com/spinnaker/{gitname}.git",
"website_url": "http://spinnaker.io",
"github_repo": "spinnaker/{gitname}",
"public_download_numbers": false,
"public_stats": false
}}'""".format(package=package,
gitname=package.replace('spinnaker-', ''))
pkg_request = urllib2.Request(pkg_url)
pkg_request.add_header('Authorization', 'Basic ' + encoded_auth)
pkg_request.add_header('Content-Type', 'application/json')
pkg_request.get_method = lambda: 'POST'
pkg_result = urllib2.urlopen(pkg_request, pkg_data)
pkg_code = pkg_result.getcode()
if pkg_code >= 200 and pkg_code < 300:
result = urllib2.urlopen(put_request, data)
code = result.getcode()
if code < 200 or code >= 300:
raise ValueError('{code}: Could not add version to {url}\n{msg}'
.format(code=code, url=url, msg=result.read()))
print 'Wrote {source} to {url}'.format(source=source, url=url)
def publish_install_script(self, source):
path = 'InstallSpinnaker.sh'
gradle_root = self.determine_gradle_root('spinnaker')
version = determine_package_version(gradle_root, '.')
self.publish_to_bintray(source, package='spinnaker', version=version,
path='InstallSpinnaker.sh')
def publish_file(self, source, package, version):
"""Write a file to the bintray repository.
Args:
source [string]: The path to the source to copy must be local.
"""
path = os.path.basename(source)
debian_tags = ';'.join(['deb_component=spinnaker',
'deb_distribution=trusty,utopic,vivid,wily',
'deb_architecture=all'])
self.publish_to_bintray(source, package=package, version=version,
path=path, debian_tags=debian_tags)
def start_copy_file(self, source, target):
"""Start a subprocess to copy the source file.
Args:
source [string]: The path to the source to copy must be local.
target [string]: The target path can also be a storage service URI.
Returns:
BackgroundProcess
"""
if target.startswith('s3://'):
return BackgroundProcess.spawn(
'Copying {source}'.format,
'aws s3 cp "{source}" "{target}"'
.format(source=source, target=target))
elif target.startswith('gs://'):
return BackgroundProcess.spawn(
'Copying {source}'.format,
'gsutil -q -m cp "{source}" "{target}"'
.format(source=source, target=target))
else:
try:
os.makedirs(os.path.dirname(target))
except OSError:
pass
shutil.copy(source, target)
return NO_PROCESS
def start_copy_debian_target(self, name):
"""Copies the debian package for the specified subsystem.
Args:
name [string]: The name of the subsystem repository.
"""
gradle_root = self.determine_gradle_root(name)
if os.path.exists(os.path.join(name, '{name}-web'.format(name=name))):
submodule = '{name}-web'.format(name=name)
elif os.path.exists(os.path.join(name, '{name}-core'.format(name=name))):
submodule = '{name}-core'.format(name=name)
else:
submodule = '.'
version = determine_package_version(gradle_root, submodule)
build_dir = '{submodule}/build/distributions'.format(submodule=submodule)
deb_dir = os.path.join(gradle_root, build_dir)
non_spinnaker_name = '{name}_{version}_all.deb'.format(
name=name, version=version)
if os.path.exists(os.path.join(deb_dir,
'spinnaker-' + non_spinnaker_name)):
deb_file = 'spinnaker-' + non_spinnaker_name
else:
deb_file = non_spinnaker_name
if not os.path.exists(os.path.join(deb_dir, deb_file)):
error = ('.deb for name={name} version={version} is not in {dir}\n'
.format(name=name, version=version, dir=deb_dir))
raise AssertionError(error)
from_path = os.path.join(gradle_root, build_dir, deb_file)
print 'Adding {path}'.format(path=from_path)
self.__package_list.append(deb_file)
if self.__options.bintray_repo:
self.publish_file(from_path, name, version)
if self.__release_dir:
to_path = os.path.join(self.__release_dir, deb_file)
return self.start_copy_file(from_path, to_path)
else:
return NO_PROCESS
def __do_build(self, subsys):
try:
self.start_build_target(subsys, 'buildDeb').check_wait()
except Exception as ex:
self.__build_failures.append(subsys)
def build_packages(self):
"""Build all the Spinnaker packages."""
if self.__options.build:
# Build in parallel using half available cores
# to keep load in check.
pool = multiprocessing.pool.ThreadPool(
processes=min(1,
self.__options.cpu_ratio * multiprocessing.cpu_count()))
pool.map(self.__do_build, SUBSYSTEM_LIST)
if self.__build_failures:
raise RuntimeError('Builds failed for {0!r}'.format(
self.__build_failures))
# Copy subsystem packages.
processes = []
for subsys in SUBSYSTEM_LIST:
processes.append(self.start_copy_debian_target(subsys))
print 'Waiting for package copying to finish....'
for p in processes:
p.check_wait()
@staticmethod
def __zip_dir(zip_file, source_path, arcname=''):
"""Zip the contents of a directory.
Args:
zip_file: [ZipFile] The zip file to write into.
source_path: [string] The directory to add.
arcname: [string] Optional name for the source to appear as in the zip.
"""
if arcname:
# Effectively replace os.path.basename(parent_path) with arcname.
arcbase = arcname + '/'
parent_path = source_path
else:
# Will start relative paths from os.path.basename(source_path).
arcbase = ''
parent_path = os.path.dirname(source_path)
# Copy the tree at source_path adding relative paths into the zip.
rel_offset = len(parent_path) + 1
entries = os.walk(source_path)
for root, dirs, files in entries:
for dirname in dirs:
abs_path = os.path.join(root, dirname)
zip_file.write(abs_path, arcbase + abs_path[rel_offset:])
for filename in files:
abs_path = os.path.join(root, filename)
zip_file.write(abs_path, arcbase + abs_path[rel_offset:])
def add_python_test_zip(self, test_name):
"""Build encapsulated python zip file for the given test test_name.
This allows integration tests to be packaged with the release, at least
for the time being. This is useful for testing them, or validating the
initial installation and configuration.
"""
test_py = '{test_name}.py'.format(test_name=test_name)
testdir = os.path.join(self.__project_dir, 'build/tests')
try:
os.makedirs(testdir)
except OSError:
pass
zip_path = os.path.join(testdir, test_py + '.zip')
zip = zipfile.ZipFile(zip_path, 'w')
try:
zip.writestr('__main__.py', """
from {test_name} import main
import sys
if __name__ == '__main__':
retcode = main()
sys.exit(retcode)
""".format(test_name=test_name))
# Add citest sources as baseline
# TODO(ewiseblatt): 20150810
# Eventually this needs to be the transitive closure,
# but there are currently no other dependencies.
zip.writestr('__init__.py', '')
self.__zip_dir(zip, 'citest/citest', 'citest')
self.__zip_dir(zip,
'citest/spinnaker/spinnaker_testing', 'spinnaker_testing')
self.__zip_dir(zip, 'pylib/yaml', 'yaml')
zip.write('citest/spinnaker/spinnaker_system/' + test_py, test_py)
zip.close()
finally:
pass
def build_tests(self):
if not os.path.exists('citest'):
print 'Adding citest repository'
try:
self.refresher.git_clone(
refresh_source.SourceRepository('citest', 'google'))
except Exception as ex:
sys.stderr.write('*** Omitting tests: {0}\n'.format(ex.message))
return
print 'Adding tests...'
self.add_python_test_zip('aws_kato_test')
self.add_python_test_zip('google_kato_test')
self.add_python_test_zip('aws_smoke_test')
self.add_python_test_zip('google_smoke_test')
self.add_python_test_zip('google_server_group_test')
self.add_python_test_zip('bake_and_deploy_test')
@classmethod
def init_argument_parser(cls, parser):
refresh_source.Refresher.init_argument_parser(parser)
parser.add_argument('--build', default=True, action='store_true',
help='Build the sources.')
parser.add_argument(
'--cpu_ratio', type=float, default=1.25, # 125%
help='Number of concurrent threads as ratio of available cores.')
parser.add_argument('--nobuild', dest='build', action='store_false')
config_path = os.path.join(determine_project_root(), 'config')
parser.add_argument(
'--config_source', default=config_path,
help='Path to directory for release config file templates.')
parser.add_argument('--release_path', default='',
help='Specifies the path to the release to build.'
' The release name is assumed to be the basename.'
' The path can be a directory, GCS URI or S3 URI.')
parser.add_argument(
'--google_project', default='',
help='If release repository is a GCS bucket then this is the project'
' owning the bucket. The default is the project configured as the'
' default for gcloud.')
parser.add_argument(
'--aws_region', default='',
help='If release repository is a S3 bucket then this is the AWS'
' region to add the bucket to if the bucket did not already exist.')
parser.add_argument(
'--bintray_repo', default='',
help='Publish to this bintray repo.\n'
'This requires BINTRAY_USER and BINTRAY_KEY are set.')
parser.add_argument(
'--wipe_package_on_409', default=False, action='store_true',
help='Work around BinTray conflict errors by deleting the entire package and'
' retrying. Removes all prior versions so only intended for dev repos.\n')
parser.add_argument(
'--nowipe_package_on_409', dest='wipe_package_on_409', action='store_false')
def __verify_bintray(self):
if not os.environ.get('BINTRAY_KEY', None):
raise ValueError('BINTRAY_KEY environment variable not defined')
if not os.environ.get('BINTRAY_USER', None):
raise ValueError('BINTRAY_USER environment variable not defined')
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
options = parser.parse_args()
if not (options.release_path or options.bintray_repo):
sys.stderr.write(
'ERROR: Missing either a --release_path or --bintray_repo')
return -1
builder = cls(options)
if options.pull_origin:
builder.refresher.pull_all_from_origin()
builder.build_tests()
builder.build_packages()
if options.bintray_repo:
fd, temp_path = tempfile.mkstemp()
with open(os.path.join(determine_project_root(), 'InstallSpinnaker.sh'),
'r') as f:
content = f.read()
match = re.search(
'REPOSITORY_URL="https://dl\.bintray\.com/(.+)"',
content)
content = ''.join([content[0:match.start(1)],
options.bintray_repo,
content[match.end(1):]])
os.write(fd, content)
os.close(fd)
try:
builder.publish_install_script(
os.path.join(determine_project_root(), temp_path))
finally:
os.remove(temp_path)
print '\nFINISHED writing release to {rep}'.format(
rep=options.bintray_repo)
if options.release_path:
print '\nFINISHED writing release to {dir}'.format(
dir=builder.__release_dir)
if __name__ == '__main__':
sys.exit(Builder.main())
|
imosquera/spinnaker
|
dev/build_release.py
|
Python
|
apache-2.0
| 24,397
|
[
"ORCA"
] |
da5abdfb498c50448f4aacb289922c944c57e01d4de5772a6aa7014ba23a15c7
|
# Copyright (c) 2006, 2008, 2010, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""
generic classes/functions for pyreverse core/extensions
"""
from __future__ import print_function
import os
import re
import sys
########### pyreverse option utils ##############################
RCFILE = ".pyreverserc"
def get_default_options():
"""
Read config file and return list of options
"""
options = []
home = os.environ.get("HOME", "")
if home:
rcfile = os.path.join(home, RCFILE)
try:
options = open(rcfile).read().split()
except IOError:
pass # ignore if no config file found
return options
def insert_default_options():
"""insert default options to sys.argv
"""
options = get_default_options()
options.reverse()
for arg in options:
sys.argv.insert(1, arg)
# astroid utilities ###########################################################
SPECIAL = re.compile("^__[A-Za-z0-9]+[A-Za-z0-9_]*__$")
PRIVATE = re.compile("^__[_A-Za-z0-9]*[A-Za-z0-9]+_?$")
PROTECTED = re.compile("^_[_A-Za-z0-9]*$")
def get_visibility(name):
"""return the visibility from a name: public, protected, private or special
"""
if SPECIAL.match(name):
visibility = "special"
elif PRIVATE.match(name):
visibility = "private"
elif PROTECTED.match(name):
visibility = "protected"
else:
visibility = "public"
return visibility
ABSTRACT = re.compile("^.*Abstract.*")
FINAL = re.compile("^[A-Z_]*$")
def is_abstract(node):
"""return true if the given class node correspond to an abstract class
definition
"""
return ABSTRACT.match(node.name)
def is_final(node):
"""return true if the given class/function node correspond to final
definition
"""
return FINAL.match(node.name)
def is_interface(node):
# bw compat
return node.type == "interface"
def is_exception(node):
# bw compat
return node.type == "exception"
# Helpers #####################################################################
_CONSTRUCTOR = 1
_SPECIAL = 2
_PROTECTED = 4
_PRIVATE = 8
MODES = {
"ALL": 0,
"PUB_ONLY": _SPECIAL + _PROTECTED + _PRIVATE,
"SPECIAL": _SPECIAL,
"OTHER": _PROTECTED + _PRIVATE,
}
VIS_MOD = {
"special": _SPECIAL,
"protected": _PROTECTED,
"private": _PRIVATE,
"public": 0,
}
class FilterMixIn:
"""filter nodes according to a mode and nodes' visibility
"""
def __init__(self, mode):
"init filter modes"
__mode = 0
for nummod in mode.split("+"):
try:
__mode += MODES[nummod]
except KeyError as ex:
print("Unknown filter mode %s" % ex, file=sys.stderr)
self.__mode = __mode
def show_attr(self, node):
"""return true if the node should be treated
"""
visibility = get_visibility(getattr(node, "name", node))
return not self.__mode & VIS_MOD[visibility]
class ASTWalker:
"""a walker visiting a tree in preorder, calling on the handler:
* visit_<class name> on entering a node, where class name is the class of
the node in lower case
* leave_<class name> on leaving a node, where class name is the class of
the node in lower case
"""
def __init__(self, handler):
self.handler = handler
self._cache = {}
def walk(self, node, _done=None):
"""walk on the tree from <node>, getting callbacks from handler"""
if _done is None:
_done = set()
if node in _done:
raise AssertionError((id(node), node, node.parent))
_done.add(node)
self.visit(node)
for child_node in node.get_children():
assert child_node is not node
self.walk(child_node, _done)
self.leave(node)
assert node.parent is not node
def get_callbacks(self, node):
"""get callbacks from handler for the visited node"""
klass = node.__class__
methods = self._cache.get(klass)
if methods is None:
handler = self.handler
kid = klass.__name__.lower()
e_method = getattr(
handler, "visit_%s" % kid, getattr(handler, "visit_default", None)
)
l_method = getattr(
handler, "leave_%s" % kid, getattr(handler, "leave_default", None)
)
self._cache[klass] = (e_method, l_method)
else:
e_method, l_method = methods
return e_method, l_method
def visit(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[0]
if method is not None:
method(node)
def leave(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[1]
if method is not None:
method(node)
class LocalsVisitor(ASTWalker):
"""visit a project by traversing the locals dictionary"""
def __init__(self):
ASTWalker.__init__(self, self)
self._visited = {}
def visit(self, node):
"""launch the visit starting from the given node"""
if node in self._visited:
return None
self._visited[node] = 1 # FIXME: use set ?
methods = self.get_callbacks(node)
if methods[0] is not None:
methods[0](node)
if hasattr(node, "locals"): # skip Instance and other proxy
for local_node in node.values():
self.visit(local_node)
if methods[1] is not None:
return methods[1](node)
return None
|
kczapla/pylint
|
pylint/pyreverse/utils.py
|
Python
|
gpl-2.0
| 6,238
|
[
"VisIt"
] |
ccc534240f093ee096a86bb554750bc008ab4a8f5e1a9324c70c54936c1c77c8
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import re
from psi4 import core
from psi4.driver import qcdb
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import *
from psi4.driver.procrouting import *
def _method_exists(ptype, method_name):
r"""
Quick check to see if this method exists, if it does not exist we raise a convenient flag.
"""
if method_name not in procedures[ptype].keys():
alternatives = ""
alt_method_name = p4util.text.find_approximate_string_matches(method_name,
procedures[ptype].keys(), 2)
if len(alt_method_name) > 0:
alternatives = " Did you mean? %s" % (" ".join(alt_method_name))
Cptype = ptype[0].upper() + ptype[1:]
raise ValidationError('%s method "%s" is not available.%s' % (Cptype, method_name, alternatives))
def _set_convergence_criterion(ptype, method_name, scf_Ec, pscf_Ec, scf_Dc, pscf_Dc, gen_Ec, verbose=1):
r"""
This function will set local SCF and global energy convergence criterion
to the defaults listed at:
http://www.psicode.org/psi4manual/master/scf.html#convergence-and-
algorithm-defaults. SCF will be converged more tightly if a post-SCF
method is select (pscf_Ec, and pscf_Dc) else the looser (scf_Ec, and
scf_Dc convergence criterion will be used).
ptype - Procedure type (energy, gradient, etc). Nearly always test on
procedures['energy'] since that's guaranteed to exist for a method.
method_name - Name of the method
scf_Ec - E convergence criterion for scf target method
pscf_Ec - E convergence criterion for scf of post-scf target method
scf_Dc - D convergence criterion for scf target method
pscf_Dc - D convergence criterion for scf of post-scf target method
gen_Ec - E convergence criterion for post-scf target method
"""
optstash = p4util.OptionsState(
['SCF', 'E_CONVERGENCE'],
['SCF', 'D_CONVERGENCE'],
['E_CONVERGENCE'])
# Kind of want to move this out of here
_method_exists(ptype, method_name)
if verbose >= 2:
print(' Setting convergence', end=' ')
# Set method-dependent scf convergence criteria, check against energy routines
if not core.has_option_changed('SCF', 'E_CONVERGENCE'):
if procedures['energy'][method_name] in [proc.run_scf, proc.run_tdscf_energy]:
core.set_local_option('SCF', 'E_CONVERGENCE', scf_Ec)
if verbose >= 2:
print(scf_Ec, end=' ')
else:
core.set_local_option('SCF', 'E_CONVERGENCE', pscf_Ec)
if verbose >= 2:
print(pscf_Ec, end=' ')
else:
if verbose >= 2:
print('CUSTOM', core.get_option('SCF', 'E_CONVERGENCE'), end=' ')
if not core.has_option_changed('SCF', 'D_CONVERGENCE'):
if procedures['energy'][method_name] in [proc.run_scf, proc.run_tdscf_energy]:
core.set_local_option('SCF', 'D_CONVERGENCE', scf_Dc)
if verbose >= 2:
print(scf_Dc, end=' ')
else:
core.set_local_option('SCF', 'D_CONVERGENCE', pscf_Dc)
if verbose >= 2:
print(pscf_Dc, end=' ')
else:
if verbose >= 2:
print('CUSTOM', core.get_option('SCF', 'D_CONVERGENCE'), end=' ')
# Set post-scf convergence criteria (global will cover all correlated modules)
if not core.has_global_option_changed('E_CONVERGENCE'):
if procedures['energy'][method_name] != proc.run_scf:
core.set_global_option('E_CONVERGENCE', gen_Ec)
if verbose >= 2:
print(gen_Ec, end=' ')
else:
if procedures['energy'][method_name] != proc.run_scf:
if verbose >= 2:
print('CUSTOM', core.get_global_option('E_CONVERGENCE'), end=' ')
if verbose >= 2:
print('')
return optstash
def parse_arbitrary_order(name):
r"""Function to parse name string into a method family like CI or MRCC and specific
level information like 4 for CISDTQ or MRCCSDTQ.
"""
name = name.lower()
mtdlvl_mobj = re.match(r"""\A(?P<method>[a-z]+)(?P<level>\d+)\Z""", name)
# matches 'mrccsdt(q)'
if name.startswith('mrcc'):
# avoid undoing fn's good work when called twice
if name == 'mrcc':
return name, None
# grabs 'sdt(q)'
ccfullname = name[4:]
# A negative order indicates perturbative method
methods = {
'sd' : { 'method': 1, 'order': 2, 'fullname': 'CCSD' },
'sdt' : { 'method': 1, 'order': 3, 'fullname': 'CCSDT' },
'sdtq' : { 'method': 1, 'order': 4, 'fullname': 'CCSDTQ' },
'sdtqp' : { 'method': 1, 'order': 5, 'fullname': 'CCSDTQP' },
'sdtqph' : { 'method': 1, 'order': 6, 'fullname': 'CCSDTQPH' },
'sd(t)' : { 'method': 3, 'order': -3, 'fullname': 'CCSD(T)' },
'sdt(q)' : { 'method': 3, 'order': -4, 'fullname': 'CCSDT(Q)' },
'sdtq(p)' : { 'method': 3, 'order': -5, 'fullname': 'CCSDTQ(P)' },
'sdtqp(h)' : { 'method': 3, 'order': -6, 'fullname': 'CCSDTQP(H)' },
'sd(t)_l' : { 'method': 4, 'order': -3, 'fullname': 'CCSD(T)_L' },
'sdt(q)_l' : { 'method': 4, 'order': -4, 'fullname': 'CCSDT(Q)_L' },
'sdtq(p)_l' : { 'method': 4, 'order': -5, 'fullname': 'CCSDTQ(P)_L' },
'sdtqp(h)_l' : { 'method': 4, 'order': -6, 'fullname': 'CCSDTQP(H)_L' },
'sdt-1a' : { 'method': 5, 'order': 3, 'fullname': 'CCSDT-1a' },
'sdtq-1a' : { 'method': 5, 'order': 4, 'fullname': 'CCSDTQ-1a' },
'sdtqp-1a' : { 'method': 5, 'order': 5, 'fullname': 'CCSDTQP-1a' },
'sdtqph-1a' : { 'method': 5, 'order': 6, 'fullname': 'CCSDTQPH-1a' },
'sdt-1b' : { 'method': 6, 'order': 3, 'fullname': 'CCSDT-1b' },
'sdtq-1b' : { 'method': 6, 'order': 4, 'fullname': 'CCSDTQ-1b' },
'sdtqp-1b' : { 'method': 6, 'order': 5, 'fullname': 'CCSDTQP-1b' },
'sdtqph-1b' : { 'method': 6, 'order': 6, 'fullname': 'CCSDTQPH-1b' },
'2' : { 'method': 7, 'order': 2, 'fullname': 'CC2' },
'3' : { 'method': 7, 'order': 3, 'fullname': 'CC3' },
'4' : { 'method': 7, 'order': 4, 'fullname': 'CC4' },
'5' : { 'method': 7, 'order': 5, 'fullname': 'CC5' },
'6' : { 'method': 7, 'order': 6, 'fullname': 'CC6' },
'sdt-3' : { 'method': 8, 'order': 3, 'fullname': 'CCSDT-3' },
'sdtq-3' : { 'method': 8, 'order': 4, 'fullname': 'CCSDTQ-3' },
'sdtqp-3' : { 'method': 8, 'order': 5, 'fullname': 'CCSDTQP-3' },
'sdtqph-3' : { 'method': 8, 'order': 6, 'fullname': 'CCSDTQPH-3' }
} # yapf: disable
# looks for 'sdt(q)' in dictionary
if ccfullname in methods:
return 'mrcc', methods[ccfullname]
else:
raise ValidationError(f"""MRCC method '{name}' invalid.""")
elif mtdlvl_mobj:
namestump = mtdlvl_mobj.group('method')
namelevel = int(mtdlvl_mobj.group('level'))
if namestump in ['mp', 'zapt', 'ci']:
# Let mp2, mp3, mp4 pass through to select functions
if namestump == 'mp' and namelevel in [2, 3, 4]:
return name, None
# Otherwise return method and order
else:
return namestump, namelevel
else:
return name, None
else:
return name, None
def parse_cotton_irreps(irrep, point_group):
"""Return validated Cotton ordering index of `irrep` within `point_group`.
Parameters
----------
irrep : str or int
Irreducible representation. Either label (case-insensitive) or 1-based index (int or str).
point_group : str
Molecular point group label (case-insensitive).
Returns
-------
int
1-based index for `irrep` within `point_group` in Cotton ordering.
Raises
------
ValidationError
If `irrep` out-of-bounds or invalid or if `point_group` doesn't exist.
"""
cotton = {
'c1': ['a'],
'ci': ['ag', 'au'],
'c2': ['a', 'b'],
'cs': ['ap', 'app'],
'd2': ['a', 'b1', 'b2', 'b3'],
'c2v': ['a1', 'a2', 'b1', 'b2'],
'c2h': ['ag', 'bg', 'au', 'bu'],
'd2h': ['ag', 'b1g', 'b2g', 'b3g', 'au', 'b1u', 'b2u', 'b3u'],
}
boll = cotton[point_group.lower()]
if str(irrep).isdigit():
irrep = int(irrep)
if irrep > 0 and irrep <= len(boll):
return irrep
else:
if irrep.lower() in boll:
return boll.index(irrep.lower()) + 1
raise ValidationError(f"""Irrep '{irrep}' not valid for point group '{point_group}'.""")
def negotiate_derivative_type(ptype, method, user_dertype, verbose=1, return_strategy=False, proc=None):
r"""Find the best derivative level (0, 1, 2) and strategy (analytic, finite difference)
for `method` to achieve `ptype` within constraints of `user_dertype`.
Procedures
----------
ptype : {'energy', 'gradient', 'hessian'}
Type of calculation targeted by driver.
method : str
Quantum chemistry method targeted by driver. Should be correct case for procedures lookup.
user_dertype : int or None
User input on which derivative level should be employed to achieve `ptype`.
verbose : int, optional
Control amount of output printing.
return_strategy : bool, optional
See below. Form in which to return negotiated dertype.
proc : dict, optional
For testing only! Procedures table to look up `method`. Default is psi4.driver.procedures .
Returns
-------
int : {0, 1, 2}
When `return_strategy=False`, highest accessible derivative level
for `method` to achieve `ptype` within constraints of `user_dertype`.
str : {'analytic', '1_0', '2_1', '2_0'}
When `return_strategy=True`, highest accessible derivative strategy
for `method` to achieve `ptype` within constraints of `user_dertype`.
Raises
------
ValidationError
When input validation fails. When `user_dertype` exceeds `ptype`.
MissingMethodError
When `method` is unavailable at all. When `user_dertype` exceeds what available for `method`.
"""
egh = ['energy', 'gradient', 'hessian']
def alternative_methods_message(method_name, dertype):
alt_method_name = p4util.text.find_approximate_string_matches(method_name, proc['energy'].keys(), 2)
alternatives = ''
if len(alt_method_name) > 0:
alternatives = f""" Did you mean? {' '.join(alt_method_name)}"""
return f"""Derivative method ({method_name}) and derivative level ({dertype}) are not available.{alternatives}"""
# Validate input dertypes
if ptype not in egh:
raise ValidationError("_find_derivative_type: ptype must either be gradient or hessian.")
if not (user_dertype is None or isinstance(user_dertype, int)):
raise ValidationError(f"user_dertype ({user_dertype}) should only be None or int!")
if proc is None:
proc = procedures
dertype = "(auto)"
# Find the highest dertype program can provide for method, as encoded in procedures and managed methods
# Managed methods return finer grain "is available" info. For example, "is analytic ROHF DF HF gradient available?"
# from managed method, not just "is HF gradient available?" from procedures.
if method in proc['hessian']:
dertype = 2
if proc['hessian'][method].__name__.startswith('select_'):
try:
proc['hessian'][method](method, probe=True)
except ManagedMethodError:
dertype = 1
if proc['gradient'][method].__name__.startswith('select_'):
try:
proc['gradient'][method](method, probe=True)
except ManagedMethodError:
dertype = 0
if proc['energy'][method].__name__.startswith('select_'):
try:
proc['energy'][method](method, probe=True)
except ManagedMethodError:
raise MissingMethodError(alternative_methods_message(method, 'any'))
elif method in proc['gradient']:
dertype = 1
if proc['gradient'][method].__name__.startswith('select_'):
try:
proc['gradient'][method](method, probe=True)
except ManagedMethodError:
dertype = 0
if proc['energy'][method].__name__.startswith('select_'):
try:
proc['energy'][method](method, probe=True)
except ManagedMethodError:
raise MissingMethodError(alternative_methods_message(method, 'any'))
elif method in proc['energy']:
dertype = 0
if proc['energy'][method].__name__.startswith('select_'):
try:
proc['energy'][method](method, probe=True)
except ManagedMethodError:
raise MissingMethodError(alternative_methods_message(method, 'any'))
highest_der_program_can_provide = dertype
# Negotiations. In particular:
# * don't return higher derivative than targeted by driver
# * don't return higher derivative than spec'd by user. that is, user can downgrade derivative
# * alert user to conflict between driver and user_dertype
if user_dertype is not None and user_dertype > egh.index(ptype):
raise ValidationError(f'User dertype ({user_dertype}) excessive for target calculation ({ptype})')
if dertype != "(auto)" and egh.index(ptype) < dertype:
dertype = egh.index(ptype)
if dertype != "(auto)" and user_dertype is not None:
if user_dertype <= dertype:
dertype = user_dertype
else:
raise MissingMethodError(alternative_methods_message(method, user_dertype))
if verbose > 1:
print(
f'Dertivative negotiations: target/driver={egh.index(ptype)}, best_available={highest_der_program_can_provide}, user_dertype={user_dertype}, FINAL={dertype}'
)
#if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (dertype != 0):
# raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with derivatives, so stopping.')
#if (core.get_global_option('PCM')) and (dertype != 0):
# core.print_out('\nPCM analytic gradients are not implemented yet, re-routing to finite differences.\n')
# dertype = 0
# Summary validation (superfluous)
if dertype == '(auto)' or method not in proc[['energy', 'gradient', 'hessian'][dertype]]:
raise MissingMethodError(alternative_methods_message(method, dertype))
if return_strategy:
if ptype == egh[dertype]:
return 'analytic'
elif ptype == 'gradient' and egh[dertype] == 'energy':
return '1_0'
elif ptype == 'hessian' and egh[dertype] == 'gradient':
return '2_1'
elif ptype == 'hessian' and egh[dertype] == 'energy':
return '2_0'
else:
return dertype
|
jgonthier/psi4
|
psi4/driver/driver_util.py
|
Python
|
lgpl-3.0
| 16,594
|
[
"Psi4"
] |
331345c260eafdd4e62121806dd86a1722de03f1ea14d257788f20d45e7d2a34
|
#!/usr/bin/env python
# make index.html in results directory
import sys
from glob import glob
import os.path
from copy import deepcopy
import re
script, analysis_name, sample_csv_file, comparison_csv_file, output_dir, \
output_filename = sys.argv
# Output directories:
fastqc_dir = os.path.join(output_dir, "fastqc")
fastqc_post_trim_dir = os.path.join(output_dir, "fastqc_post_trim")
trimmed_dir = os.path.join(output_dir, "trimmed_reads")
tophat_raw_dir = os.path.join(output_dir, "tophat_raw")
tophat_dir = os.path.join(output_dir, "tophat")
cufflinks_dir = os.path.join(output_dir, "cufflinks")
cuffmerge_dir = os.path.join(output_dir, "cuffmerge")
cuffdiff_dir = os.path.join(output_dir, "cuffdiff")
htseq_dir = os.path.join(output_dir, "htseq_count")
counts_dir = os.path.join(output_dir, "read_counts")
merged_dir = os.path.join(output_dir, "tophat_merged")
rnaseqc_dir = os.path.join(output_dir, "rnaseqc")
alignment_stats_dir = os.path.join(output_dir, "alignment_stats")
qc_summary_dir = os.path.join(output_dir, "qc_summary")
main_voom_dir = os.path.join(output_dir, "voom_analysis")
main_edger_dir = os.path.join(output_dir, "edgeR_analysis")
voom_dir = os.path.join(main_voom_dir, analysis_name + "_voom")
edger_dir = os.path.join(main_edger_dir, analysis_name + "_edgeR")
cuffmerge_sub_dir = os.path.join(cuffmerge_dir, analysis_name + "_cuffmerge")
##########################################################################
def html_table(heading, list_table):
html_output = []
html_output.append("<hr width=80%>")
html_output.append("<h2>%s</h2>" % heading)
html_output.append("<table width=90%>")
for list in list_table:
html_output.append("<tr>")
html_output.append("<td width=20%%><a href=\"%s\">%s</a></td>" % \
(list[0],list[1]))
html_output.append("<td>%s</td>" % list[2])
html_output.append("</tr>")
html_output.append("</table>")
return "\n".join(html_output)
def no_output(heading, message):
html_output = []
html_output.append("<hr width=80%>")
html_output.append("<h2>%s</h2>" % heading)
html_output.append("<p>%s<p>" % message)
return "\n".join(html_output)
##########################################################################
## Summary
CSS = """<html>
<head><title>RNA-Seq Results Index</title>
<style type="text/css">
table {
border-width: 1px;
border-spacing: 2px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
font: 12pt Georgia;
}
table td {
border-width: 2px;
padding: 4px;
border-style: solid;
border-color: gray;
}
p.summary {
background-color: #FAF2D2;
padding: 10px 10px 10px 10px;
border: 1px #FFD54D solid;
width: 80%;
text-align: left !important;
}
body {
font: 10pt Verdana;
margin: 10px 100px;
}
</style>
</head>
<body>
<center>"""
summary_html = """
<h2>Index</h2>
<p class=summary>
This HTML file was generated with the sample CSV file:
<br>
<a href=\"%s\">%s</a>
<br>
and the comparison CSV file:
<br>
<a href=\"%s\">%s</a>
<br>
and the results directory:
<br>
<a href=\"%s\">%s</a>
<br><br>
To update this file, rerun the 'makeIndexHtml' stage:
<br>
eg.
<br>
<code>
python RNA-seq_pipeline.py \\
<br>--opts=pipeline_config,pipeline_stages_config \\
<br>--style=run \\
<br>--end=makeIndexHtml \\
<br>--force=makeIndexHtml
</code>
</p>
""" % (sample_csv_file, sample_csv_file, comparison_csv_file,
comparison_csv_file, output_dir, output_dir)
##########################################################################
## QC
# fastQC pre and post trim summary, qc summary file
qc_files = glob(qc_summary_dir + "/*")
qc_files = map(lambda x: os.path.basename(x), qc_files)
qc_files_dict = {
0: ["FastQC_basic_statistics_summary.html",
"FastQC Basic Statistics Summary",
"Statistics for each FASTQ file (pre and post-trimming) including read counts, %GC content, and sequence length."],
1: ["FastQC_summary.html",
"FastQC Summary",
"Pass/Fail summary for all FASTQ files (pre and post-trimming) for metrics such as sequence quality, GC content, and overrepresented sequences"],
2: ["qc_summary.html",
"RNA-Seq QC Summary",
"QC statistics for each sample. Includes TopHat alignment statistics and RNASeQC statistics."],
3: ["rnaseqc/",
"RNASeQC Directory",
"Detailed RNASeQC statistics for each sample."]
}
qc_table = []
relative_path = os.path.basename(qc_summary_dir) + "/"
for i in range(0,len(qc_files_dict)):
if qc_files_dict[i][0] in qc_files:
qc_files_dict[i][0] = relative_path + qc_files_dict[i][0]
qc_table.append(qc_files_dict[i])
rnaseqc_success_files = glob(rnaseqc_dir + "/*.rnaSeQC.Success")
# Individual RNASeQC results
if len(rnaseqc_success_files) > 0:
for i in range(0,len(qc_files_dict)):
if qc_files_dict[i][0] == "rnaseqc/":
qc_table.append(qc_files_dict[i])
if len(qc_table) > 0:
qc_html = html_table("QC Results", qc_table)
else:
qc_html = no_output("QC Results", "No QC output available.")
##########################################################################
## Read counts
read_count_files = glob(counts_dir + "/*")
read_count_files = map(lambda x: os.path.basename(x), read_count_files)
samples_csv_name = analysis_name + "_counts.txt"
if samples_csv_name in read_count_files:
read_counts_table = [[os.path.join(counts_dir,samples_csv_name),
"Read counts",
"Read counts in plain text format. Includes annotations if provided. Can be uploaded for analysis in <a href=\"http://www.vicbioinformatics.com/degust/\">Degust</a>."]]
else:
read_counts_table = []
if len(read_counts_table) > 0:
read_counts_html = html_table("Read Counts", read_counts_table)
else:
read_counts_html = no_output("Read Counts", "No counts available.")
##########################################################################
## Voom results
voom_files = glob(voom_dir + "/*")
voom_files = map(lambda x: os.path.basename(x), voom_files)
voom_files_dict = {
0: ["top_genes_.*.html",
"Top 100 DE Genes (HTML)",
"Top 100 differentially expressed genes from Voom in HTML table format. Includes raw read counts for each gene from each sample (and TMM-normalised counts per million reads in parentheses). Includes gene annotations if provided."],
1: ["top_genes_.*.txt",
"Top 100 DE Genes",
"Top 100 differentially expressed genes from Voom in plain text format. Includes gene annotations if provided."],
2: ["all_genes_.*.txt",
"All Genes",
"All genes from Voom sorted by descending p-value in plain text format."],
3: ["plots_.*.pdf",
"DE Plots",
"Page 1: MA plot.\nPage 2: P-value distribution\nPage 3 & 4: Heatmap\nPage 5+: Histograms"],
4: ["plots.pdf",
"QC Plots",
"Page 1: Raw gene count boxplots.\nPage 2: TMM-normalised gene count boxplots.\nPage 3: RLE plot of raw counts.\nPage 4: RLE plot of TMM-normalised counts.\nPage5: Voom mean-variance trend plot."],
5: ["MDS.pdf",
"MDS Plots",
"MDS plots using multiple dimensions."],
6: ["PCA.pdf",
"PCA Plots",
"PCA pair plot with 4 dimensions."],
7: ["heatmap.pdf",
"Clustered Heatmap",
"Heatmap of the top 1000 genes with most variance."],
8: ["voom.stdout",
"Voom Stdout file",
"Standard output from R."]
}
voom_table = []
relative_path = "/".join(voom_dir.split("/")[-2:]) + "/"
if "voom.Success" in voom_files:
for i in range(0,len(voom_files_dict)):
for j in range(0,len(voom_files)):
if re.search(voom_files_dict[i][0], voom_files[j]):
new_row = deepcopy(voom_files_dict[i])
new_row[0] = relative_path + voom_files[j]
new_row[1] = voom_files[j]
voom_table.append(new_row)
voom_html = html_table("Voom Results", voom_table)
else:
voom_html = no_output("Voom Results", "No output from Voom available.")
##########################################################################
## edgeR results
edger_files = glob(edger_dir + "/*")
edger_files = map(lambda x: os.path.basename(x), edger_files)
edger_files_dict = {
0: ["top_genes_.*.html",
"Top 100 DE Genes (HTML)",
"Top 100 differentially expressed genes from edgeR in HTML table format. Includes raw read counts for each gene from each sample (and TMM-normalised counts per million reads in parentheses). Includes gene annotations if provided."],
1: ["top_genes_.*.txt",
"Top 100 DE Genes",
"Top 100 differentially expressed genes from edgeR in plain text format. Includes gene annotations if provided."],
2: ["all_genes_.*.txt",
"All Genes",
"All genes from edgeR sorted by descending p-value in plain text format."],
3: ["plots_.*.pdf",
"DE Plots",
"Page 1: MA plot.\nPage 2: P-value distribution\nPage 3 & 4: Heatmap\nPage 5+: Histograms"],
4: ["plots.pdf",
"QC Plots",
"Page 1: Raw gene count boxplots.\nPage 2: TMM-normalised gene count boxplots.\nPage 3: RLE plot of raw counts.\nPage 4: RLE plot of TMM-normalised counts.\nPage5: edgeR BCV plot."],
5: ["MDS.pdf",
"MDS Plots",
"MDS plots using multiple dimensions."],
6: ["PCA.pdf",
"PCA Plots",
"PCA pair plot with 4 dimensions."],
7: ["heatmap.pdf",
"Clustered Heatmap",
"Heatmap of the top 1000 genes with most variance."],
8: ["edgeR.stdout",
"edgeR Stdout file",
"Standard output from R."]
}
edger_table = []
relative_path = "/".join(edger_dir.split("/")[-2:]) + "/"
if "edgeR.Success" in edger_files:
for i in range(0,len(edger_files_dict)):
for j in range(0,len(edger_files)):
if re.search(edger_files_dict[i][0], edger_files[j]):
new_row = deepcopy(edger_files_dict[i])
new_row[0] = relative_path + edger_files[j]
new_row[1] = edger_files[j]
edger_table.append(new_row)
edger_html = html_table("EdgeR Results", edger_table)
else:
edger_html = no_output("EdgeR Results", "No output from EdgeR available.")
##########################################################################
## Cuffdiff results
cuffdiff_success_files = glob(cuffdiff_dir + "/" + analysis_name +
"*.cuffdiff.Success")
cuffdiff_sub_dirs = glob(cuffdiff_dir + "/" + analysis_name + "_*/")
cuffdiff_files_dict = {
0: ["gene_exp.diff",
"Cuffdiff Expression Results",
"Gene expression results from Cuffdiff"]
}
if len(cuffdiff_success_files) > 0:
cuffdiff_table = []
for dir in cuffdiff_sub_dirs:
cuffdiff_files = glob(dir + "/*")
cuffdiff_files = map(lambda x: os.path.basename(x), cuffdiff_files)
for i in range(0,len(cuffdiff_files_dict)):
if cuffdiff_files_dict[i][0] in cuffdiff_files:
new = deepcopy(cuffdiff_files_dict[i])
new[0] = "%s/%s" % (dir, cuffdiff_files_dict[i][0])
new[2] += " (%s)." % os.path.basename(dir[:-1])
cuffdiff_table.append(new)
cuffdiff_html = html_table("Cuffdiff Results", cuffdiff_table)
else:
cuffdiff_html = no_output("Cuffdiff Results",
"No output from Cuffdiff available.")
##########################################################################
## Other directories
misc_dir = glob(output_dir + "/*")
misc_dir = map(lambda x: os.path.basename(x), misc_dir)
misc_dir_dict = {
0: ["trimmed_reads",
"Trimmed Reads Directory",
"Contains FASTQ files with adapters trimmed by Trimmomatic."],
1: ["fastqc",
"Pre-trimmed FastQC Directory",
"Contains FastQC results for FASTQ files before trimming."],
2: ["fastqc_post_trim",
"Post-trimmed FastQC Directory",
"Contains FastQC results for FASTQ files after trimming."],
3: ["transcriptome_index",
"Transcriptome Index Directory",
"Contains transcriptome index files built from reference files for TopHat to use for alignment."],
4: ["tophat_raw",
"Raw TopHat Output Directory",
"Contains TopHat output."],
5: ["tophat",
"Tophat Directory",
"Contains sorted and indexed TopHat alignment files."],
6: ["tophat_merged",
"Merged TopHat Directroy",
"Contains TopHat output with accepted hits and unmapped reads merged into one alignment file. Also contains outputs from processes such as reordering, adding read groups, and duplicate flaging which are needed for RNASeQC."],
7: ["alignment_stats",
"Alignment Stats Directory",
"Contains alignment statistics of TopHat alignment files."],
8: ["rnaseqc",
"RNASeQC Directory",
"Contains RNASeQC output."],
9: ["cufflinks",
"Cufflinks Directory",
"Contains Cufflinks assemblies."],
10: ["cuffmerge",
"Cuffmerge Directory",
"Contains merged assemblies by Cuffmerge."],
11: ["cuffdiff",
"Cuffdiff Directory",
"Contains differential gene expression analysis by Cuffdiff."],
12: ["htseq_count",
"HTSeq Directory",
"Contains read counts for each gene from HTSeq-count."],
13: ["read_counts",
"Read Counts Directory",
"Contains read counts filtered and merged into one file. Includes gene annotations if available."],
14: ["voom_analysis",
"Voom Directory",
"Contains output from Voom analysis."],
15: ["edgeR_analysis",
"edgeR Directory",
"Contains output from edgeR analysis."],
}
misc_table = []
for i in range(0,len(misc_dir_dict)):
if misc_dir_dict[i][0] in misc_dir:
misc_table.append(misc_dir_dict[i])
misc_html = html_table("Result Directories", misc_table)
##########################################################################
## Output
html = [CSS, summary_html, qc_html, read_counts_html, voom_html, edger_html,
cuffdiff_html, misc_html, "</center></body>\n</html>"]
output_file = open(output_filename, 'w')
for i in html:
output_file.write(i)
output_file.write("\n<br><br>\n")
output_file.close()
|
jessicachung/rna_seq_pipeline
|
scripts/html_index.py
|
Python
|
mit
| 14,481
|
[
"HTSeq"
] |
feec37e984cfb7a26043a9e4089a24014879711f70e5eb32ff9a7e1d7fc1dae1
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
****************
espressopp.Int3D
****************
.. function:: espressopp.Int3D(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.Int3D.x(v, [0)
:param v:
:param [0:
:type v:
:type [0:
:rtype:
.. function:: espressopp.Int3D.y(v, [1)
:param v:
:param [1:
:type v:
:type [1:
:rtype:
.. function:: espressopp.Int3D.z(v, [2)
:param v:
:param [2:
:type v:
:type [2:
:rtype:
.. function:: espressopp.toInt3DFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toInt3D(\*args)
:param \*args:
:type \*args:
"""
import numbers
from _espressopp import Int3D
from espressopp import esutil
__all__ = ['Int3D', 'toInt3DFromVector', 'toInt3D']
def extend_class():
# This injects additional methods into the Int3D class and pulls it
# into this module
orig_init = Int3D.__init__
def init(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, int):
x = y = z = arg0
else:
raise TypeError("Cannot initialize Int3D from %s" % (args))
elif len(args) == 3:
x, y, z = args
else:
raise TypeError("Cannot initialize Int3D from %s" % (args))
orig_init(self, x, y, z)
def _get_getter_setter(idx):
def _get(self):
return self[idx]
def _set(self, v):
self[idx] = v
return _get, _set
def _eq(self, other):
if other is None:
return False
if isinstance(other, numbers.Number):
return all([self[i] == other for i in range(3)])
return all([self[i] == other[i] for i in range(3)])
def _lt(self, other):
if other is None:
return True
return id(self) < id(other)
def _gt(self, other):
if other is None:
return True
return id(self) > id(other)
Int3D.__init__ = init
for i, property_name in enumerate(['x', 'y', 'z']):
setattr(Int3D, property_name, property(*_get_getter_setter(i)))
Int3D.__str__ = lambda self: str((self[0], self[1], self[2]))
Int3D.__repr__ = lambda self: 'Int3D' + str(self)
Int3D.__eq__ = _eq
Int3D.__lt__ = _lt
Int3D.__gt__ = _gt
Int3D.as_tuple = lambda self: (self[0], self[1], self[2])
extend_class()
def toInt3DFromVector(*args):
"""Try to convert the arguments to a Int3D.
This function will only convert to a Int3D if x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Int3D(*args)
elif len(args) == 3:
return Int3D(*args)
raise TypeError("Specify x, y and z.")
def toInt3D(*args):
"""Try to convert the arguments to a Int3D, returns the argument,
if it is already a Int3D."""
if len(args) == 1 and isinstance(args[0], Int3D):
return args[0]
else:
return Int3D(*args)
|
espressopp/espressopp
|
src/Int3D.py
|
Python
|
gpl-3.0
| 4,560
|
[
"ESPResSo"
] |
06a394fae166f663200e216231dea303ce6b455b428e760a34b98d66efcc575a
|
#!/usr/bin/env python
'''
Use multi-grid to accelerate DFT numerical integration.
'''
import numpy
from pyscf.pbc import gto, dft
from pyscf.pbc.dft import multigrid
cell = gto.M(
verbose = 4,
a = numpy.eye(3)*3.5668,
atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751''',
basis = 'sto3g',
#basis = 'ccpvdz',
#basis = 'gth-dzvp',
#pseudo = 'gth-pade'
)
mf = dft.UKS(cell)
mf.xc = 'lda,vwn'
#
# There are two ways to enable multigrid numerical integration
#
# Method 1: use multigrid.multigrid function to update SCF object
#
mf = multigrid.multigrid(mf)
mf.kernel()
#
# Method 2: MultiGridFFTDF is a DF object. It can be enabled by overwriting
# the default with_df object.
#
kpts = cell.make_kpts([4,4,4])
mf = dft.KRKS(cell, kpts)
mf.xc = 'lda,vwn'
mf.with_df = multigrid.MultiGridFFTDF(cell, kpts)
mf.kernel()
#
# MultiGridFFTDF can be used with second order SCF solver.
#
mf = mf.newton()
mf.kernel()
|
sunqm/pyscf
|
examples/pbc/27-multigrid.py
|
Python
|
apache-2.0
| 1,230
|
[
"PySCF"
] |
42d0450b8d9358af4199b46fd7512d86c1ff24493510c36020608b79c67ee45c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2016-04-11
# @Filename: rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-07-30 19:42:17
from __future__ import division, print_function
import os
import warnings
import astropy.io.ascii
import astropy.table
import astropy.units
import astropy.wcs
import numpy
from astropy.io import fits
import marvin
from marvin.core.exceptions import MarvinError, MarvinUserWarning
from marvin.utils.datamodel.drp import datamodel_rss
from marvin.utils.datamodel.drp.base import Spectrum as SpectrumDataModel
from .core import MarvinToolsClass
from .cube import Cube
from .mixins import NSAMixIn
from .quantities.spectrum import Spectrum
class RSS(MarvinToolsClass, NSAMixIn, list):
"""A class to interface with a MaNGA DRP row-stacked spectra file.
This class represents a fully reduced DRP row-stacked spectra object,
initialised either from a file, a database, or remotely via the Marvin API.
Instances of `.RSS` are a list of `.RSSFiber` objects, one for each fibre
and exposure. `.RSSFiber` are initialised lazily, containing only basic
information. They need to be initialised by calling `.RSSFiber.load`
(unless `.RSS.autoload` is ``True``, in which case the instance is loaded
when first accessed).
In addition to the input arguments supported by `~.MarvinToolsClass` and
`~.NSAMixIn`, this class accepts an ``autoload`` keyword argument that
defines whether `.RSSFiber` objects should be automatically loaded when
they are accessed.
"""
_qualflag = 'DRP3QUAL'
def __init__(self, input=None, filename=None, mangaid=None, plateifu=None,
mode=None, data=None, release=None, autoload=True,
drpall=None, download=None, nsa_source='auto'):
MarvinToolsClass.__init__(self, input=input, filename=filename, mangaid=mangaid,
plateifu=plateifu, mode=mode, data=data, release=release,
drpall=drpall, download=download)
NSAMixIn.__init__(self, nsa_source=nsa_source)
#: An `astropy.table.Table` with the observing information associated
#: with this RSS object.
self.obsinfo = None
#: If True, unloaded `.RSSFiber` instances are automatically loaded
#: when accessed. Otherwise, they need to be loaded via `.RSSFiber.load`.
self.autoload = autoload
if self.data_origin == 'file':
self._load_rss_from_file(data=self.data)
elif self.data_origin == 'db':
self._load_rss_from_db(data=self.data)
elif self.data_origin == 'api':
self._load_rss_from_api()
Cube._init_attributes(self)
# Checks that the drpver set in MarvinToolsClass matches the header
header_drpver = self.header['VERSDRP3'].strip()
header_drpver = 'v1_5_1' if header_drpver == 'v1_5_0' else header_drpver
assert header_drpver == self._drpver, ('mismatch between cube._drpver={0} '
'and header drpver={1}'.format(self._drpver,
header_drpver))
# EXPNUM in obsinfo is a string. Cast it to int
self.obsinfo['EXPNUM'] = self.obsinfo['EXPNUM'].astype(numpy.int32)
# Inits self as an empty list.
list.__init__(self, [])
self._populate_fibres()
def _set_datamodel(self):
"""Sets the datamodel for DRP."""
self.datamodel = datamodel_rss[self.release.upper()]
self._bitmasks = datamodel_rss[self.release.upper()].bitmasks
def __repr__(self):
"""Representation for RSS."""
return ('<Marvin RSS (mangaid={self.mangaid!r}, plateifu={self.plateifu!r}, '
'mode={self.mode!r}, data_origin={self.data_origin!r})>'.format(self=self))
def __getitem__(self, fiberid):
"""Returns the `.RSSFiber` whose fiberid matches the input."""
rssfiber = super(RSS, self).__getitem__(fiberid)
if self.autoload and not rssfiber.loaded:
rssfiber.load()
return rssfiber
def _getFullPath(self):
"""Returns the full path of the file in the tree."""
if not self.plateifu:
return None
plate, ifu = self.plateifu.split('-')
return super(RSS, self)._getFullPath('mangarss', ifu=ifu, drpver=self._drpver, plate=plate, wave='LOG')
def download(self):
"""Downloads the cube using sdss_access - Rsync"""
if not self.plateifu:
return None
plate, ifu = self.plateifu.split('-')
return super(RSS, self).download('mangarss', ifu=ifu, drpver=self._drpver, plate=plate, wave='LOG')
def getCube(self):
"""Returns the `~marvin.tools.cube.Cube` associated with this RSS."""
return Cube(plateifu=self.plateifu, mode=self.mode, release=self.release)
def load_all(self):
"""Loads all the `.RSSFiber` associated to this `.RSS` instance."""
for rssfiber in self:
if not rssfiber.loaded:
rssfiber.load()
def select_fibers(self, exposure_no=None, set=None, mjd=None):
"""Selects fibres that match one or multiple of the input parameters.
Parameters
----------
exposure_no : int
The exposure number. Ignored if ``None``.
set : int
The set id of the exposure. Ignored if ``None``.
mjd : int
The MJD of the exposure. Ignored if ``None``.
Returns
-------
rssfibers : list
A list of `.RSSFiber` instances whose obsinfo matches all the input
parameters. The `.RSS.autoload` option is respected.
Example
-------
>>> rss = marvin.tools.RSS('8485-1901')
>>> fibers = rss.select_fibers(set=2)
>>> fibers
[<RSSFiber [ 2.22306705, 11.84955406, 9.65761662, ..., 0. ,
0. , 0. ] 1e-17 erg / (Angstrom cm2 fiber s)>,
<RSSFiber [2.18669987, 1.4861778 , 2.55065155, ..., 0. , 0. ,
0. ] 1e-17 erg / (Angstrom cm2 fiber s)>,
<RSSFiber [2.75228763, 5.53485441, 2.31695175, ..., 0. , 0. ,
0. ] 1e-17 erg / (Angstrom cm2 fiber s)>]
"""
mask_exp = (self.obsinfo['EXPNUM'].astype(int) == exposure_no) if exposure_no else True
mask_set = (self.obsinfo['SET'].astype(int) == set) if set else True
mask_mjd = (self.obsinfo['MJD'].astype(int) == mjd) if mjd else True
mask = mask_exp & mask_set & mask_mjd
valid_exposures = numpy.where(mask)[0]
n_exposures = len(self.obsinfo)
n_fibres_per_exposure = self._nfibers // n_exposures
fibre_to_exposure = numpy.arange(self._nfibers) // n_fibres_per_exposure
fibres_in_valid_exposures = numpy.where(numpy.in1d(fibre_to_exposure, valid_exposures))[0]
return [self[ii] for ii in fibres_in_valid_exposures]
def _load_rss_from_file(self, data=None):
"""Initialises the RSS object from a file."""
if data is not None:
assert isinstance(data, fits.HDUList), 'data is not an HDUList object'
else:
try:
self.data = fits.open(self.filename)
except (IOError, OSError) as err:
raise OSError('filename {0} cannot be found: {1}'.format(self.filename, err))
self.header = self.data[1].header
self.wcs = astropy.wcs.WCS(self.header)
self.wcs = self.wcs.dropaxis(1) # The header creates an empty axis for the exposures.
# Confirm that this is a RSS file
assert 'XPOS' in self.data and self.header['CTYPE1'] == 'WAVE-LOG', \
'invalid file type. It does not appear to be a LOGRSS.'
self._wavelength = self.data['WAVE'].data
self._shape = None
self._nfibers = self.data['FLUX'].shape[0]
self.obsinfo = astropy.table.Table(self.data['OBSINFO'].data)
Cube._do_file_checks(self)
def _load_rss_from_db(self, data=None):
"""Initialises the RSS object from the DB.
At this time the DB does not contain enough information to successfully
instantiate a RSS object so we hack the data access mode to try to use
files. For users this should be irrelevant since they rarely will have
a Marvin DB. For the API, it means the access to RSS data will happen
via files.
"""
warnings.warn('DB mode is not working for RSS. Trying file access mode.',
MarvinUserWarning)
fullpath = self._getFullPath()
if fullpath and os.path.exists(fullpath):
self.filename = fullpath
self.data_origin = 'file'
self._load_rss_from_file()
else:
raise MarvinError('cannot find a valid RSS file for '
'plateifu={self.plateifu!r}, release={self.release!r}'
.format(self=self))
def _load_rss_from_api(self):
"""Initialises the RSS object using the remote API."""
# Checks that the RSS exists.
routeparams = {'name': self.plateifu}
url = marvin.config.urlmap['api']['getRSS']['url'].format(**routeparams)
try:
response = self._toolInteraction(url.format(name=self.plateifu))
except Exception as ee:
raise MarvinError('found a problem when checking if remote RSS '
'exists: {0}'.format(str(ee)))
data = response.getData()
self.header = fits.Header.fromstring(data['header'])
self.wcs = astropy.wcs.WCS(fits.Header.fromstring(data['wcs_header']))
self._wavelength = data['wavelength']
self._nfibers = data['nfibers']
self.obsinfo = astropy.io.ascii.read(data['obsinfo'])
if self.plateifu != data['plateifu']:
raise MarvinError('remote RSS has a different plateifu!')
return
def _populate_fibres(self):
"""Populates the internal list of fibres."""
n_exposures = len(self.obsinfo)
n_fibres_per_exposure = self._nfibers // n_exposures
for fiberid in range(self._nfibers):
exp_index = fiberid // n_fibres_per_exposure
exp_obsinfo = self.obsinfo[[exp_index]]
self.append(RSSFiber(fiberid, self, self._wavelength, load=False,
obsinfo=exp_obsinfo, pixmask_flag=self.header['MASKNAME']))
class RSSFiber(Spectrum):
"""A `~astropy.units.Quantity` representing a fibre observation.
Represents the spectral flux observed though a fibre, and associated with
an `.RSS` object. In addition to the flux, it contains information about
the inverse variance, mask, and other associated spectra defined in the
datamodel.
Parameters
----------
fiberid : int
The fiberid (0-indexed row in the parent `.RSS` object) for this fibre
observation.
rss : `.RSS`
The parent `.RSS` object with which this fibre observation is
associated.
wavelength : numpy.ndarray
The wavelength positions of each array element, in Angstrom.
load : bool
Whether the information in the `.RSSFiber` should be loaded during
instantiation. Defaults to lazy loading (use `.RSSFiber.load` to
load the fibre information).
obsinfo : astropy.table.Table
A `~astropy.table.Table` with the information for the exposure to
which this fibre observation belongs.
kwargs : dict
Additional keyword arguments to be passed to `.Spectrum`.
"""
def __new__(cls, fiberid, rss, wavelength, pixmask_flag=None, load=False,
obsinfo=None, **kwargs):
# For now we instantiate a mostly empty Spectrum. Proper instantiation
# will happen in load().
array_size = len(wavelength)
obj = super(RSSFiber, cls).__new__(
cls, numpy.zeros(array_size, dtype=numpy.float64), wavelength,
scale=None, unit=None,)
obj._extra_attributes = ['fiberid', 'rss', 'loaded', 'obsinfo']
obj._spectra = []
return obj
def __init__(self, fiberid, rss, wavelength, pixmask_flag=None, load=False,
obsinfo=None, **kwargs):
self.fiberid = fiberid
self.rss = rss
self.obsinfo = obsinfo
self.pixmask_flag = pixmask_flag
self.loaded = False
if load:
self.load()
def __repr__(self):
if not self.loaded:
return ('<RSSFiber (plateifu={self.rss.plateifu!r}, '
'fiberid={self.fiberid!r}, loaded={self.loaded!r})>'.format(self=self))
else:
return super(RSSFiber, self).__repr__()
def __array_finalize__(self, obj):
if obj is None:
return
super(RSSFiber, self).__array_finalize__(obj)
# Adds _extra_attributes from the previous object.
if hasattr(obj, '_extra_attributes'):
for attr in obj._extra_attributes:
setattr(self, attr, getattr(obj, attr, None))
self._extra_attributes = getattr(obj, '_extra_attributes', None)
# Adds the additional spectra from the previous object.
if hasattr(obj, '_spectra'):
for spectrum in obj._spectra:
setattr(self, spectrum, getattr(obj, spectrum, None))
self._spectra = getattr(obj, '_spectra', None)
def __getitem__(self, sl):
new_obj = super(RSSFiber, self).__getitem__(sl)
for spectra_name in self._spectra:
current_spectrum = getattr(self, spectra_name, None)
new_spectrum = None if current_spectrum is None else current_spectrum.__getitem__(sl)
setattr(new_obj, spectra_name, new_spectrum)
return new_obj
def load(self):
"""Loads the fibre information."""
assert self.loaded is False, 'object already loaded.'
# Depending on whether the parent RSS is a file or API-populated, we
# select the data to use.
if self.rss.data_origin == 'file':
# If the data origin is a file we use the HDUList in rss.data
rss_data = self.rss.data
elif self.rss.data_origin == 'api':
# If data origin is the API, we make a request for the data
# associated with this fiberid for all the extensions in the file.
url = marvin.config.urlmap['api']['getRSSFiber']['url']
try:
response = self.rss._toolInteraction(url.format(name=self.rss.plateifu,
fiberid=self.fiberid))
except Exception as ee:
raise MarvinError('found a problem retrieving RSS fibre data for '
'plateifu={!r}, fiberid={!r}: {}'.format(
self.rss.plateifu, self.fiberid, str(ee)))
api_data = response.getData()
# Create a quick and dirty HDUList from the API data so that we
# can parse it in the same way as if the data origin is file.
rss_data = astropy.io.fits.HDUList([astropy.io.fits.PrimaryHDU()])
for ext in api_data:
rss_data.append(astropy.io.fits.ImageHDU(data=api_data[ext], name=ext.upper()))
else:
raise ValueError('invalid data_origin={!r}'.format(self.rss.data_origin))
# Compile a list of all RSS datamodel extensions, either RSS or spectra
datamodel_extensions = self.rss.datamodel.rss + self.rss.datamodel.spectra
for extension in datamodel_extensions:
# Retrieve the value (and mask and ivar, if associated) for each extension.
value, ivar, mask = self._get_extension_data(extension, rss_data,
data_origin=self.rss.data_origin)
if extension.name == 'flux':
self.value[:] = value[:]
self.ivar = ivar
self.mask = mask
self._set_unit(extension.unit)
else:
new_spectrum = Spectrum(value, self.wavelength, ivar=ivar, mask=mask,
unit=extension.unit)
setattr(self, extension.name, new_spectrum)
self._spectra.append(extension.name)
self.loaded = True
def _get_extension_data(self, extension, data, data_origin='file'):
"""Returns the value of an extension for this fibre, either from file or API.
Parameters
----------
extension : datamodel object
The datamodel object containing the information for the extension
we want to retrieve.
data : ~astropy.io.fits.HDUList
An `~astropy.io.fits.HDUList` object containing the RSS
information.
"""
# Determine if this is an RSS datamodel object or an spectrum.
# If the origin is the API, the extension data contains a single spectrum,
# not a row-stacked array, so we consider it a 1D array.
is_extension_data_1D = isinstance(extension, SpectrumDataModel) or data_origin == 'api'
value = data[extension.fits_extension()].data
if extension.has_mask():
mask = data[extension.fits_extension('mask')].data
else:
mask = None
if hasattr(extension, 'has_ivar') and extension.has_ivar():
ivar = data[extension.fits_extension('ivar')].data
elif hasattr(extension, 'has_std') and extension.has_std():
std = data[extension.fits_extension('std')].data
ivar = 1. / (std**2)
else:
ivar = None
# If this is an RSS, gets the right row in the stacked spectra.
if not is_extension_data_1D:
value = value[self.fiberid, :]
mask = mask[self.fiberid, :] if mask is not None else None
ivar = ivar[self.fiberid, :] if ivar is not None else None
return value, ivar, mask
@property
def masked(self):
"""Return a masked array where the mask is greater than zero."""
assert self.mask is not None, 'mask is not set'
return numpy.ma.array(self.value, mask=(self.mask > 0))
def descale(self):
"""Returns a copy of the object in which the scale is unity.
Note that this only affects to the core value of this quantity.
Associated array attributes will not be modified.
Example:
>>> fiber.unit
Unit("1e-17 erg / (Angstrom cm2 fiber s)")
>>> fiber[100]
<RSSFiber 0.270078063011169 1e-17 erg / (Angstrom cm2 fiber s)>
>>> fiber_descaled = fiber.descale()
>>> fiber_descaled.unit
Unit("Angstrom cm2 fiber s")
>>> fiber[100]
<RSSFiber 2.70078063011169e-18 erg / (Angstrom cm2 fiber s)>
"""
if self.unit.scale == 1:
return self
value_descaled = self.value * self.unit.scale
value_unit = astropy.units.CompositeUnit(1, self.unit.bases, self.unit.powers)
if self.ivar is not None:
ivar_descaled = self.ivar / (self.unit.scale ** 2)
else:
ivar_descaled = None
copy_of_self = self.copy()
copy_of_self.value[:] = value_descaled
copy_of_self.ivar = ivar_descaled
copy_of_self._set_unit(value_unit)
return copy_of_self
|
sdss/marvin
|
python/marvin/tools/rss.py
|
Python
|
bsd-3-clause
| 19,928
|
[
"Brian"
] |
4ce6f356591ca20b6133442205a8fbd7f537fc3c83a0d03b807d610048b72f7b
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_resolution,
str_or_none,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PeerTubeIE(InfoExtractor):
_INSTANCES_RE = r'''(?:
# Taken from https://instances.joinpeertube.org/instances
peertube\.rainbowswingers\.net|
tube\.stanisic\.nl|
peer\.suiri\.us|
medias\.libox\.fr|
videomensoif\.ynh\.fr|
peertube\.travelpandas\.eu|
peertube\.rachetjay\.fr|
peertube\.montecsys\.fr|
tube\.eskuero\.me|
peer\.tube|
peertube\.umeahackerspace\.se|
tube\.nx-pod\.de|
video\.monsieurbidouille\.fr|
tube\.openalgeria\.org|
vid\.lelux\.fi|
video\.anormallostpod\.ovh|
tube\.crapaud-fou\.org|
peertube\.stemy\.me|
lostpod\.space|
exode\.me|
peertube\.snargol\.com|
vis\.ion\.ovh|
videosdulib\.re|
v\.mbius\.io|
videos\.judrey\.eu|
peertube\.osureplayviewer\.xyz|
peertube\.mathieufamily\.ovh|
www\.videos-libr\.es|
fightforinfo\.com|
peertube\.fediverse\.ru|
peertube\.oiseauroch\.fr|
video\.nesven\.eu|
v\.bearvideo\.win|
video\.qoto\.org|
justporn\.cc|
video\.vny\.fr|
peervideo\.club|
tube\.taker\.fr|
peertube\.chantierlibre\.org|
tube\.ipfixe\.info|
tube\.kicou\.info|
tube\.dodsorf\.as|
videobit\.cc|
video\.yukari\.moe|
videos\.elbinario\.net|
hkvideo\.live|
pt\.tux\.tf|
www\.hkvideo\.live|
FIGHTFORINFO\.com|
pt\.765racing\.com|
peertube\.gnumeria\.eu\.org|
nordenmedia\.com|
peertube\.co\.uk|
tube\.darfweb\.eu|
tube\.kalah-france\.org|
0ch\.in|
vod\.mochi\.academy|
film\.node9\.org|
peertube\.hatthieves\.es|
video\.fitchfamily\.org|
peertube\.ddns\.net|
video\.ifuncle\.kr|
video\.fdlibre\.eu|
tube\.22decembre\.eu|
peertube\.harmoniescreatives\.com|
tube\.fabrigli\.fr|
video\.thedwyers\.co|
video\.bruitbruit\.com|
peertube\.foxfam\.club|
peer\.philoxweb\.be|
videos\.bugs\.social|
peertube\.malbert\.xyz|
peertube\.bilange\.ca|
libretube\.net|
diytelevision\.com|
peertube\.fedilab\.app|
libre\.video|
video\.mstddntfdn\.online|
us\.tv|
peertube\.sl-network\.fr|
peertube\.dynlinux\.io|
peertube\.david\.durieux\.family|
peertube\.linuxrocks\.online|
peerwatch\.xyz|
v\.kretschmann\.social|
tube\.otter\.sh|
yt\.is\.nota\.live|
tube\.dragonpsi\.xyz|
peertube\.boneheadmedia\.com|
videos\.funkwhale\.audio|
watch\.44con\.com|
peertube\.gcaillaut\.fr|
peertube\.icu|
pony\.tube|
spacepub\.space|
tube\.stbr\.io|
v\.mom-gay\.faith|
tube\.port0\.xyz|
peertube\.simounet\.net|
play\.jergefelt\.se|
peertube\.zeteo\.me|
tube\.danq\.me|
peertube\.kerenon\.com|
tube\.fab-l3\.org|
tube\.calculate\.social|
peertube\.mckillop\.org|
tube\.netzspielplatz\.de|
vod\.ksite\.de|
peertube\.laas\.fr|
tube\.govital\.net|
peertube\.stephenson\.cc|
bistule\.nohost\.me|
peertube\.kajalinifi\.de|
video\.ploud\.jp|
video\.omniatv\.com|
peertube\.ffs2play\.fr|
peertube\.leboulaire\.ovh|
peertube\.tronic-studio\.com|
peertube\.public\.cat|
peertube\.metalbanana\.net|
video\.1000i100\.fr|
peertube\.alter-nativ-voll\.de|
tube\.pasa\.tf|
tube\.worldofhauru\.xyz|
pt\.kamp\.site|
peertube\.teleassist\.fr|
videos\.mleduc\.xyz|
conf\.tube|
media\.privacyinternational\.org|
pt\.forty-two\.nl|
video\.halle-leaks\.de|
video\.grosskopfgames\.de|
peertube\.schaeferit\.de|
peertube\.jackbot\.fr|
tube\.extinctionrebellion\.fr|
peertube\.f-si\.org|
video\.subak\.ovh|
videos\.koweb\.fr|
peertube\.zergy\.net|
peertube\.roflcopter\.fr|
peertube\.floss-marketing-school\.com|
vloggers\.social|
peertube\.iriseden\.eu|
videos\.ubuntu-paris\.org|
peertube\.mastodon\.host|
armstube\.com|
peertube\.s2s\.video|
peertube\.lol|
tube\.open-plug\.eu|
open\.tube|
peertube\.ch|
peertube\.normandie-libre\.fr|
peertube\.slat\.org|
video\.lacaveatonton\.ovh|
peertube\.uno|
peertube\.servebeer\.com|
peertube\.fedi\.quebec|
tube\.h3z\.jp|
tube\.plus200\.com|
peertube\.eric\.ovh|
tube\.metadocs\.cc|
tube\.unmondemeilleur\.eu|
gouttedeau\.space|
video\.antirep\.net|
nrop\.cant\.at|
tube\.ksl-bmx\.de|
tube\.plaf\.fr|
tube\.tchncs\.de|
video\.devinberg\.com|
hitchtube\.fr|
peertube\.kosebamse\.com|
yunopeertube\.myddns\.me|
peertube\.varney\.fr|
peertube\.anon-kenkai\.com|
tube\.maiti\.info|
tubee\.fr|
videos\.dinofly\.com|
toobnix\.org|
videotape\.me|
voca\.tube|
video\.heromuster\.com|
video\.lemediatv\.fr|
video\.up\.edu\.ph|
balafon\.video|
video\.ivel\.fr|
thickrips\.cloud|
pt\.laurentkruger\.fr|
video\.monarch-pass\.net|
peertube\.artica\.center|
video\.alternanet\.fr|
indymotion\.fr|
fanvid\.stopthatimp\.net|
video\.farci\.org|
v\.lesterpig\.com|
video\.okaris\.de|
tube\.pawelko\.net|
peertube\.mablr\.org|
tube\.fede\.re|
pytu\.be|
evertron\.tv|
devtube\.dev-wiki\.de|
raptube\.antipub\.org|
video\.selea\.se|
peertube\.mygaia\.org|
video\.oh14\.de|
peertube\.livingutopia\.org|
peertube\.the-penguin\.de|
tube\.thechangebook\.org|
tube\.anjara\.eu|
pt\.pube\.tk|
video\.samedi\.pm|
mplayer\.demouliere\.eu|
widemus\.de|
peertube\.me|
peertube\.zapashcanon\.fr|
video\.latavernedejohnjohn\.fr|
peertube\.pcservice46\.fr|
peertube\.mazzonetto\.eu|
video\.irem\.univ-paris-diderot\.fr|
video\.livecchi\.cloud|
alttube\.fr|
video\.coop\.tools|
video\.cabane-libre\.org|
peertube\.openstreetmap\.fr|
videos\.alolise\.org|
irrsinn\.video|
video\.antopie\.org|
scitech\.video|
tube2\.nemsia\.org|
video\.amic37\.fr|
peertube\.freeforge\.eu|
video\.arbitrarion\.com|
video\.datsemultimedia\.com|
stoptrackingus\.tv|
peertube\.ricostrongxxx\.com|
docker\.videos\.lecygnenoir\.info|
peertube\.togart\.de|
tube\.postblue\.info|
videos\.domainepublic\.net|
peertube\.cyber-tribal\.com|
video\.gresille\.org|
peertube\.dsmouse\.net|
cinema\.yunohost\.support|
tube\.theocevaer\.fr|
repro\.video|
tube\.4aem\.com|
quaziinc\.com|
peertube\.metawurst\.space|
videos\.wakapo\.com|
video\.ploud\.fr|
video\.freeradical\.zone|
tube\.valinor\.fr|
refuznik\.video|
pt\.kircheneuenburg\.de|
peertube\.asrun\.eu|
peertube\.lagob\.fr|
videos\.side-ways\.net|
91video\.online|
video\.valme\.io|
video\.taboulisme\.com|
videos-libr\.es|
tv\.mooh\.fr|
nuage\.acostey\.fr|
video\.monsieur-a\.fr|
peertube\.librelois\.fr|
videos\.pair2jeux\.tube|
videos\.pueseso\.club|
peer\.mathdacloud\.ovh|
media\.assassinate-you\.net|
vidcommons\.org|
ptube\.rousset\.nom\.fr|
tube\.cyano\.at|
videos\.squat\.net|
video\.iphodase\.fr|
peertube\.makotoworkshop\.org|
peertube\.serveur\.slv-valbonne\.fr|
vault\.mle\.party|
hostyour\.tv|
videos\.hack2g2\.fr|
libre\.tube|
pire\.artisanlogiciel\.net|
videos\.numerique-en-commun\.fr|
video\.netsyms\.com|
video\.die-partei\.social|
video\.writeas\.org|
peertube\.swarm\.solvingmaz\.es|
tube\.pericoloso\.ovh|
watching\.cypherpunk\.observer|
videos\.adhocmusic\.com|
tube\.rfc1149\.net|
peertube\.librelabucm\.org|
videos\.numericoop\.fr|
peertube\.koehn\.com|
peertube\.anarchmusicall\.net|
tube\.kampftoast\.de|
vid\.y-y\.li|
peertube\.xtenz\.xyz|
diode\.zone|
tube\.egf\.mn|
peertube\.nomagic\.uk|
visionon\.tv|
videos\.koumoul\.com|
video\.rastapuls\.com|
video\.mantlepro\.com|
video\.deadsuperhero\.com|
peertube\.musicstudio\.pro|
peertube\.we-keys\.fr|
artitube\.artifaille\.fr|
peertube\.ethernia\.net|
tube\.midov\.pl|
peertube\.fr|
watch\.snoot\.tube|
peertube\.donnadieu\.fr|
argos\.aquilenet\.fr|
tube\.nemsia\.org|
tube\.bruniau\.net|
videos\.darckoune\.moe|
tube\.traydent\.info|
dev\.videos\.lecygnenoir\.info|
peertube\.nayya\.org|
peertube\.live|
peertube\.mofgao\.space|
video\.lequerrec\.eu|
peertube\.amicale\.net|
aperi\.tube|
tube\.ac-lyon\.fr|
video\.lw1\.at|
www\.yiny\.org|
videos\.pofilo\.fr|
tube\.lou\.lt|
choob\.h\.etbus\.ch|
tube\.hoga\.fr|
peertube\.heberge\.fr|
video\.obermui\.de|
videos\.cloudfrancois\.fr|
betamax\.video|
video\.typica\.us|
tube\.piweb\.be|
video\.blender\.org|
peertube\.cat|
tube\.kdy\.ch|
pe\.ertu\.be|
peertube\.social|
videos\.lescommuns\.org|
tv\.datamol\.org|
videonaute\.fr|
dialup\.express|
peertube\.nogafa\.org|
megatube\.lilomoino\.fr|
peertube\.tamanoir\.foucry\.net|
peertube\.devosi\.org|
peertube\.1312\.media|
tube\.bootlicker\.party|
skeptikon\.fr|
video\.blueline\.mg|
tube\.homecomputing\.fr|
tube\.ouahpiti\.info|
video\.tedomum\.net|
video\.g3l\.org|
fontube\.fr|
peertube\.gaialabs\.ch|
tube\.kher\.nl|
peertube\.qtg\.fr|
video\.migennes\.net|
tube\.p2p\.legal|
troll\.tv|
videos\.iut-orsay\.fr|
peertube\.solidev\.net|
videos\.cemea\.org|
video\.passageenseine\.fr|
videos\.festivalparminous\.org|
peertube\.touhoppai\.moe|
sikke\.fi|
peer\.hostux\.social|
share\.tube|
peertube\.walkingmountains\.fr|
videos\.benpro\.fr|
peertube\.parleur\.net|
peertube\.heraut\.eu|
tube\.aquilenet\.fr|
peertube\.gegeweb\.eu|
framatube\.org|
thinkerview\.video|
tube\.conferences-gesticulees\.net|
peertube\.datagueule\.tv|
video\.lqdn\.fr|
tube\.mochi\.academy|
media\.zat\.im|
video\.colibris-outilslibres\.org|
tube\.svnet\.fr|
peertube\.video|
peertube3\.cpy\.re|
peertube2\.cpy\.re|
videos\.tcit\.fr|
peertube\.cpy\.re
)'''
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
_API_BASE = 'https://%s/api/v1/videos/%s/%s'
_VALID_URL = r'''(?x)
(?:
peertube:(?P<host>[^:]+):|
https?://(?P<host_2>%s)/(?:videos/(?:watch|embed)|api/v\d/videos)/
)
(?P<id>%s)
''' % (_INSTANCES_RE, _UUID_RE)
_TESTS = [{
'url': 'https://framatube.org/videos/watch/9c9de5e8-0a1e-484a-b099-e80766180a6d',
'md5': '9bed8c0137913e17b86334e5885aacff',
'info_dict': {
'id': '9c9de5e8-0a1e-484a-b099-e80766180a6d',
'ext': 'mp4',
'title': 'What is PeerTube?',
'description': 'md5:3fefb8dde2b189186ce0719fda6f7b10',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
'timestamp': 1538391166,
'upload_date': '20181001',
'uploader': 'Framasoft',
'uploader_id': '3',
'uploader_url': 'https://framatube.org/accounts/framasoft',
'channel': 'Les vidéos de Framasoft',
'channel_id': '2',
'channel_url': 'https://framatube.org/video-channels/bf54d359-cfad-4935-9d45-9d6be93f63e8',
'language': 'en',
'license': 'Attribution - Share Alike',
'duration': 113,
'view_count': int,
'like_count': int,
'dislike_count': int,
'tags': ['framasoft', 'peertube'],
'categories': ['Science & Technology'],
}
}, {
'url': 'https://peertube.tamanoir.foucry.net/videos/watch/0b04f13d-1e18-4f1d-814e-4979aa7c9c44',
'only_matching': True,
}, {
# nsfw
'url': 'https://tube.22decembre.eu/videos/watch/9bb88cd3-9959-46d9-9ab9-33d2bb704c39',
'only_matching': True,
}, {
'url': 'https://tube.22decembre.eu/videos/embed/fed67262-6edb-4d1c-833b-daa9085c71d7',
'only_matching': True,
}, {
'url': 'https://tube.openalgeria.org/api/v1/videos/c1875674-97d0-4c94-a058-3f7e64c962e8',
'only_matching': True,
}, {
'url': 'peertube:video.blender.org:b37a5b9f-e6b5-415c-b700-04a5cd6ec205',
'only_matching': True,
}]
@staticmethod
def _extract_peertube_url(webpage, source_url):
mobj = re.match(
r'https?://(?P<host>[^/]+)/videos/(?:watch|embed)/(?P<id>%s)'
% PeerTubeIE._UUID_RE, source_url)
if mobj and any(p in webpage for p in (
'<title>PeerTube<',
'There will be other non JS-based clients to access PeerTube',
'>We are sorry but it seems that PeerTube is not compatible with your web browser.<')):
return 'peertube:%s:%s' % mobj.group('host', 'id')
@staticmethod
def _extract_urls(webpage, source_url):
entries = re.findall(
r'''(?x)<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//%s/videos/embed/%s)'''
% (PeerTubeIE._INSTANCES_RE, PeerTubeIE._UUID_RE), webpage)
if not entries:
peertube_url = PeerTubeIE._extract_peertube_url(webpage, source_url)
if peertube_url:
entries = [peertube_url]
return entries
def _call_api(self, host, video_id, path, note=None, errnote=None, fatal=True):
return self._download_json(
self._API_BASE % (host, video_id, path), video_id,
note=note, errnote=errnote, fatal=fatal)
def _get_subtitles(self, host, video_id):
captions = self._call_api(
host, video_id, 'captions', note='Downloading captions JSON',
fatal=False)
if not isinstance(captions, dict):
return
data = captions.get('data')
if not isinstance(data, list):
return
subtitles = {}
for e in data:
language_id = try_get(e, lambda x: x['language']['id'], compat_str)
caption_url = urljoin('https://%s' % host, e.get('captionPath'))
if not caption_url:
continue
subtitles.setdefault(language_id or 'en', []).append({
'url': caption_url,
})
return subtitles
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') or mobj.group('host_2')
video_id = mobj.group('id')
video = self._call_api(
host, video_id, '', note='Downloading video JSON')
title = video['name']
formats = []
for file_ in video['files']:
if not isinstance(file_, dict):
continue
file_url = url_or_none(file_.get('fileUrl'))
if not file_url:
continue
file_size = int_or_none(file_.get('size'))
format_id = try_get(
file_, lambda x: x['resolution']['label'], compat_str)
f = parse_resolution(format_id)
f.update({
'url': file_url,
'format_id': format_id,
'filesize': file_size,
})
if format_id == '0p':
f['vcodec'] = 'none'
else:
f['fps'] = int_or_none(file_.get('fps'))
formats.append(f)
self._sort_formats(formats)
full_description = self._call_api(
host, video_id, 'description', note='Downloading description JSON',
fatal=False)
description = None
if isinstance(full_description, dict):
description = str_or_none(full_description.get('description'))
if not description:
description = video.get('description')
subtitles = self.extract_subtitles(host, video_id)
def data(section, field, type_):
return try_get(video, lambda x: x[section][field], type_)
def account_data(field, type_):
return data('account', field, type_)
def channel_data(field, type_):
return data('channel', field, type_)
category = data('category', 'label', compat_str)
categories = [category] if category else None
nsfw = video.get('nsfw')
if nsfw is bool:
age_limit = 18 if nsfw else 0
else:
age_limit = None
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': urljoin(url, video.get('thumbnailPath')),
'timestamp': unified_timestamp(video.get('publishedAt')),
'uploader': account_data('displayName', compat_str),
'uploader_id': str_or_none(account_data('id', int)),
'uploader_url': url_or_none(account_data('url', compat_str)),
'channel': channel_data('displayName', compat_str),
'channel_id': str_or_none(channel_data('id', int)),
'channel_url': url_or_none(channel_data('url', compat_str)),
'language': data('language', 'id', compat_str),
'license': data('licence', 'label', compat_str),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(video.get('likes')),
'dislike_count': int_or_none(video.get('dislikes')),
'age_limit': age_limit,
'tags': try_get(video, lambda x: x['tags'], list),
'categories': categories,
'formats': formats,
'subtitles': subtitles
}
|
spvkgn/youtube-dl
|
youtube_dl/extractor/peertube.py
|
Python
|
unlicense
| 27,588
|
[
"MOE"
] |
7e04d824aa63ed9be36e2368caa6f7314798528e3947d327b4dfa90f7768def2
|
#
# @file TestWriteSBML.py
# @brief Write SBML unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestWriteSBML.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def util_NaN():
z = 1e300
z = z * z
return z - z
def util_PosInf():
z = 1e300
z = z * z
return z
def util_NegInf():
z = 1e300
z = z * z
return -z
def wrapString(s):
return s
pass
def LV_L1v1():
return "level=\"1\" version=\"1\">\n"
pass
def LV_L1v2():
return "level=\"1\" version=\"2\">\n"
pass
def LV_L2v1():
return "level=\"2\" version=\"1\">\n"
pass
def LV_L2v2():
return "level=\"2\" version=\"2\">\n"
pass
def LV_L2v3():
return "level=\"2\" version=\"3\">\n"
pass
def NS_L1():
return "xmlns=\"http://www.sbml.org/sbml/level1\" "
pass
def NS_L2v1():
return "xmlns=\"http://www.sbml.org/sbml/level2\" "
pass
def NS_L2v2():
return "xmlns=\"http://www.sbml.org/sbml/level2/version2\" "
pass
def NS_L2v3():
return "xmlns=\"http://www.sbml.org/sbml/level2/version3\" "
pass
def SBML_END():
return "</sbml>\n"
pass
def SBML_START():
return "<sbml "
pass
def XML_START():
return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
pass
def wrapSBML_L1v1(s):
r = XML_START()
r += SBML_START()
r += NS_L1()
r += LV_L1v1()
r += s
r += SBML_END()
return r
pass
def wrapSBML_L1v2(s):
r = XML_START()
r += SBML_START()
r += NS_L1()
r += LV_L1v2()
r += s
r += SBML_END()
return r
pass
def wrapSBML_L2v1(s):
r = XML_START()
r += SBML_START()
r += NS_L2v1()
r += LV_L2v1()
r += s
r += SBML_END()
return r
pass
def wrapSBML_L2v2(s):
r = XML_START()
r += SBML_START()
r += NS_L2v2()
r += LV_L2v2()
r += s
r += SBML_END()
return r
pass
def wrapSBML_L2v3(s):
r = XML_START()
r += SBML_START()
r += NS_L2v3()
r += LV_L2v3()
r += s
r += SBML_END()
return r
pass
def wrapXML(s):
r = XML_START()
r += s
return r
pass
class TestWriteSBML(unittest.TestCase):
global S
S = None
global D
D = None
def equals(self, *x):
if len(x) == 2:
return x[0] == x[1]
elif len(x) == 1:
return x[0] == self.OSS.str()
def setUp(self):
self.D = libsbml.SBMLDocument()
self.S = None
pass
def tearDown(self):
self.D = None
self.S = None
pass
def test_SBMLWriter_create(self):
w = libsbml.SBMLWriter()
self.assert_( w != None )
_dummyList = [ w ]; _dummyList[:] = []; del _dummyList
pass
def test_SBMLWriter_setProgramName(self):
w = libsbml.SBMLWriter()
self.assert_( w != None )
i = w.setProgramName( "sss")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
i = w.setProgramName("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
_dummyList = [ w ]; _dummyList[:] = []; del _dummyList
pass
def test_SBMLWriter_setProgramVersion(self):
w = libsbml.SBMLWriter()
self.assert_( w != None )
i = w.setProgramVersion( "sss")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
i = w.setProgramVersion("")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
_dummyList = [ w ]; _dummyList[:] = []; del _dummyList
pass
def test_WriteSBML_AlgebraicRule(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<algebraicRule formula=\"x + 1\"/>";
r = self.D.createModel().createAlgebraicRule()
r.setFormula("x + 1")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_AlgebraicRule_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<algebraicRule>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" </apply>\n" +
" </math>\n" +
"</algebraicRule>")
r = self.D.createModel().createAlgebraicRule()
r.setFormula("x + 1")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_AlgebraicRule_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<algebraicRule sboTerm=\"SBO:0000004\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" </apply>\n" +
" </math>\n" +
"</algebraicRule>")
r = self.D.createModel().createAlgebraicRule()
r.setFormula("x + 1")
r.setSBOTerm(4)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Compartment(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<compartment name=\"A\" volume=\"2.1\" outside=\"B\"/>";
c = self.D.createModel().createCompartment()
c.setId("A")
c.setSize(2.1)
c.setOutside("B")
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_CompartmentType(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<compartmentType id=\"ct\"/>";
ct = self.D.createModel().createCompartmentType()
ct.setId("ct")
ct.setSBOTerm(4)
self.assertEqual( True, self.equals(expected,ct.toSBML()) )
pass
def test_WriteSBML_CompartmentType_withSBO(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<compartmentType sboTerm=\"SBO:0000004\" id=\"ct\"/>";
ct = self.D.createModel().createCompartmentType()
ct.setId("ct")
ct.setSBOTerm(4)
self.assertEqual( True, self.equals(expected,ct.toSBML()) )
pass
def test_WriteSBML_CompartmentVolumeRule(self):
self.D.setLevelAndVersion(1,1,False)
expected = wrapString("<compartmentVolumeRule " + "formula=\"v + c\" type=\"rate\" compartment=\"c\"/>")
self.D.createModel()
self.D.getModel().createCompartment().setId("c")
r = self.D.getModel().createRateRule()
r.setVariable("c")
r.setFormula("v + c")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_CompartmentVolumeRule_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<assignmentRule variable=\"c\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> v </ci>\n" +
" <ci> c </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</assignmentRule>")
self.D.createModel()
self.D.getModel().createCompartment().setId("c")
r = self.D.getModel().createAssignmentRule()
r.setVariable("c")
r.setFormula("v + c")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_CompartmentVolumeRule_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<assignmentRule sboTerm=\"SBO:0000005\" variable=\"c\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> v </ci>\n" +
" <ci> c </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</assignmentRule>")
self.D.createModel()
self.D.getModel().createCompartment().setId("c")
r = self.D.getModel().createAssignmentRule()
r.setVariable("c")
r.setFormula("v + c")
r.setSBOTerm(5)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_CompartmentVolumeRule_defaults(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<compartmentVolumeRule formula=\"v + c\" compartment=\"c\"/>";
self.D.createModel()
self.D.getModel().createCompartment().setId("c")
r = self.D.getModel().createAssignmentRule()
r.setVariable("c")
r.setFormula("v + c")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Compartment_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<compartment id=\"M\" spatialDimensions=\"2\" size=\"2.5\"/>";
c = self.D.createModel().createCompartment()
c.setId("M")
c.setSize(2.5)
dim = 2
c.setSpatialDimensions(dim)
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Compartment_L2v1_constant(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<compartment id=\"cell\" size=\"1.2\" constant=\"false\"/>";
c = self.D.createModel().createCompartment()
c.setId("cell")
c.setSize(1.2)
c.setConstant(False)
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Compartment_L2v1_unsetSize(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<compartment id=\"A\"/>";
c = self.D.createModel().createCompartment()
c.setId("A")
c.unsetSize()
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Compartment_L2v2_compartmentType(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<compartment id=\"cell\" compartmentType=\"ct\"/>";
c = self.D.createModel().createCompartment()
c.setId("cell")
c.setCompartmentType("ct")
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Compartment_L2v3_SBO(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<compartment sboTerm=\"SBO:0000005\" id=\"cell\"/>";
c = self.D.createModel().createCompartment()
c.setId("cell")
c.setSBOTerm(5)
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Compartment_unsetVolume(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<compartment name=\"A\"/>";
c = self.D.createModel().createCompartment()
c.setId("A")
c.unsetVolume()
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Constraint(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<constraint sboTerm=\"SBO:0000064\"/>";
ct = self.D.createModel().createConstraint()
ct.setSBOTerm(64)
self.assertEqual( True, self.equals(expected,ct.toSBML()) )
pass
def test_WriteSBML_Constraint_full(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<constraint sboTerm=\"SBO:0000064\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
" <message>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\"> Species P1 is out of range </p>\n" +
" </message>\n" +
"</constraint>")
c = self.D.createModel().createConstraint()
node = libsbml.parseFormula("leq(P1,t)")
c.setMath(node)
c.setSBOTerm(64)
text = libsbml.XMLNode.convertStringToXMLNode(" Species P1 is out of range ")
triple = libsbml.XMLTriple("p", "http://www.w3.org/1999/xhtml", "")
att = libsbml.XMLAttributes()
xmlns = libsbml.XMLNamespaces()
xmlns.add("http://www.w3.org/1999/xhtml")
p = libsbml.XMLNode(triple,att,xmlns)
p.addChild(text)
triple1 = libsbml.XMLTriple("message", "", "")
att1 = libsbml.XMLAttributes()
message = libsbml.XMLNode(triple1,att1)
message.addChild(p)
c.setMessage(message)
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Constraint_math(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<constraint>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</constraint>")
c = self.D.createModel().createConstraint()
node = libsbml.parseFormula("leq(P1,t)")
c.setMath(node)
self.assertEqual( True, self.equals(expected,c.toSBML()) )
pass
def test_WriteSBML_Event(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<event id=\"e\"/>";
e = self.D.createModel().createEvent()
e.setId("e")
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_WithSBO(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<event sboTerm=\"SBO:0000076\" id=\"e\"/>";
e = self.D.createModel().createEvent()
e.setId("e")
e.setSBOTerm(76)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_WithUseValuesFromTriggerTime(self):
expected = "<event id=\"e\" useValuesFromTriggerTime=\"false\"/>";
self.D.setLevelAndVersion(2,4,False)
e = self.D.createModel().createEvent()
e.setId("e")
e.setUseValuesFromTriggerTime(False)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_both(self):
expected = wrapString("<event id=\"e\">\n" +
" <trigger>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
" </trigger>\n" +
" <delay>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"integer\"> 5 </cn>\n" +
" </math>\n" +
" </delay>\n" +
"</event>")
self.D.setLevelAndVersion(2,1,False)
e = self.D.createModel().createEvent()
e.setId("e")
node1 = libsbml.parseFormula("leq(P1,t)")
t = libsbml.Trigger( 2,1 )
t.setMath(node1)
node = libsbml.parseFormula("5")
d = libsbml.Delay( 2,1 )
d.setMath(node)
e.setDelay(d)
e.setTrigger(t)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_delay(self):
expected = wrapString("<event id=\"e\">\n" +
" <delay>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"integer\"> 5 </cn>\n" +
" </math>\n" +
" </delay>\n" +
"</event>")
self.D.setLevelAndVersion(2,1,False)
e = self.D.createModel().createEvent()
e.setId("e")
node = libsbml.parseFormula("5")
d = libsbml.Delay( 2,1 )
d.setMath(node)
e.setDelay(d)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_delayWithSBO(self):
expected = wrapString("<event id=\"e\">\n" +
" <delay sboTerm=\"SBO:0000064\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"integer\"> 5 </cn>\n" +
" </math>\n" +
" </delay>\n" +
"</event>")
self.D.setLevelAndVersion(2,3,False)
e = self.D.createModel().createEvent()
e.setId("e")
node = libsbml.parseFormula("5")
d = libsbml.Delay( 2,3 )
d.setMath(node)
d.setSBOTerm(64)
e.setDelay(d)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_full(self):
expected = wrapString("<event id=\"e\">\n" +
" <trigger>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
" </trigger>\n" +
" <listOfEventAssignments>\n" +
" <eventAssignment sboTerm=\"SBO:0000064\" variable=\"k2\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"integer\"> 0 </cn>\n" +
" </math>\n" +
" </eventAssignment>\n" +
" </listOfEventAssignments>\n" +
"</event>")
self.D.setLevelAndVersion(2,3,False)
e = self.D.createModel().createEvent()
e.setId("e")
node = libsbml.parseFormula("leq(P1,t)")
t = libsbml.Trigger( 2,3 )
t.setMath(node)
math = libsbml.parseFormula("0")
ea = libsbml.EventAssignment( 2,3 )
ea.setVariable("k2")
ea.setMath(math)
ea.setSBOTerm(64)
e.setTrigger(t)
e.addEventAssignment(ea)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_trigger(self):
expected = wrapString("<event id=\"e\">\n" +
" <trigger>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
" </trigger>\n" +
"</event>")
self.D.setLevelAndVersion(2,1,False)
e = self.D.createModel().createEvent()
e.setId("e")
node = libsbml.parseFormula("leq(P1,t)")
t = libsbml.Trigger( 2,1 )
t.setMath(node)
e.setTrigger(t)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_Event_trigger_withSBO(self):
expected = wrapString("<event id=\"e\">\n" +
" <trigger sboTerm=\"SBO:0000064\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <leq/>\n" +
" <ci> P1 </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
" </trigger>\n" +
"</event>")
self.D.setLevelAndVersion(2,3,False)
e = self.D.createModel().createEvent()
e.setId("e")
node = libsbml.parseFormula("leq(P1,t)")
t = libsbml.Trigger( 2,3 )
t.setMath(node)
t.setSBOTerm(64)
e.setTrigger(t)
self.assertEqual( True, self.equals(expected,e.toSBML()) )
pass
def test_WriteSBML_FunctionDefinition(self):
expected = wrapString("<functionDefinition id=\"pow3\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <lambda>\n" +
" <bvar>\n" +
" <ci> x </ci>\n" +
" </bvar>\n" +
" <apply>\n" +
" <power/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n" +
" </lambda>\n" +
" </math>\n" +
"</functionDefinition>")
fd = libsbml.FunctionDefinition( 2,4 )
fd.setId("pow3")
fd.setMath(libsbml.parseFormula("lambda(x, x^3)"))
self.assertEqual( True, self.equals(expected,fd.toSBML()) )
pass
def test_WriteSBML_FunctionDefinition_withSBO(self):
expected = wrapString("<functionDefinition sboTerm=\"SBO:0000064\" id=\"pow3\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <lambda>\n" +
" <bvar>\n" +
" <ci> x </ci>\n" +
" </bvar>\n" +
" <apply>\n" +
" <power/>\n" +
" <ci> x </ci>\n" +
" <cn type=\"integer\"> 3 </cn>\n" +
" </apply>\n" +
" </lambda>\n" +
" </math>\n" +
"</functionDefinition>")
fd = libsbml.FunctionDefinition( 2,4 )
fd.setId("pow3")
fd.setMath(libsbml.parseFormula("lambda(x, x^3)"))
fd.setSBOTerm(64)
self.assertEqual( True, self.equals(expected,fd.toSBML()) )
pass
def test_WriteSBML_INF(self):
expected = "<parameter id=\"p\" value=\"INF\"/>";
p = self.D.createModel().createParameter()
p.setId("p")
p.setValue(util_PosInf())
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_InitialAssignment(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<initialAssignment sboTerm=\"SBO:0000064\" symbol=\"c\"/>";
ia = self.D.createModel().createInitialAssignment()
ia.setSBOTerm(64)
ia.setSymbol("c")
self.assertEqual( True, self.equals(expected,ia.toSBML()) )
pass
def test_WriteSBML_InitialAssignment_math(self):
expected = wrapString("<initialAssignment symbol=\"c\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> a </ci>\n" +
" <ci> b </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</initialAssignment>")
ia = self.D.createModel().createInitialAssignment()
node = libsbml.parseFormula("a + b")
ia.setMath(node)
ia.setSymbol("c")
self.assertEqual( True, self.equals(expected,ia.toSBML()) )
pass
def test_WriteSBML_KineticLaw(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<kineticLaw formula=\"k * e\" timeUnits=\"second\" " + "substanceUnits=\"item\"/>")
kl = self.D.createModel().createReaction().createKineticLaw()
kl.setFormula("k * e")
kl.setTimeUnits("second")
kl.setSubstanceUnits("item")
self.assertEqual( True, self.equals(expected,kl.toSBML()) )
pass
def test_WriteSBML_KineticLaw_ListOfParameters(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<kineticLaw formula=\"nk * e\" timeUnits=\"second\" " +
"substanceUnits=\"item\">\n" +
" <listOfParameters>\n" +
" <parameter name=\"n\" value=\"1.2\"/>\n" +
" </listOfParameters>\n" +
"</kineticLaw>")
kl = self.D.createModel().createReaction().createKineticLaw()
kl.setFormula("nk * e")
kl.setTimeUnits("second")
kl.setSubstanceUnits("item")
p = kl.createParameter()
p.setName("n")
p.setValue(1.2)
self.assertEqual( True, self.equals(expected,kl.toSBML()) )
pass
def test_WriteSBML_KineticLaw_l2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<kineticLaw timeUnits=\"second\" substanceUnits=\"item\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> vm </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> km </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" </apply>\n" +
" </math>\n" +
"</kineticLaw>")
kl = self.D.createModel().createReaction().createKineticLaw()
kl.setTimeUnits("second")
kl.setSubstanceUnits("item")
kl.setFormula("(vm * s1)/(km + s1)")
self.assertEqual( True, self.equals(expected,kl.toSBML()) )
pass
def test_WriteSBML_KineticLaw_skipOptional(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<kineticLaw formula=\"k * e\"/>";
kl = self.D.createModel().createReaction().createKineticLaw()
kl.setFormula("k * e")
self.assertEqual( True, self.equals(expected,kl.toSBML()) )
pass
def test_WriteSBML_KineticLaw_withSBO(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<kineticLaw sboTerm=\"SBO:0000001\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> vm </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> km </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" </apply>\n" +
" </math>\n" +
"</kineticLaw>")
kl = self.D.createModel().createReaction().createKineticLaw()
kl.setFormula("(vm * s1)/(km + s1)")
kl.setSBOTerm(1)
self.assertEqual( True, self.equals(expected,kl.toSBML()) )
pass
def test_WriteSBML_Model(self):
self.D.setLevelAndVersion(1,1,False)
expected = wrapSBML_L1v1(" <model name=\"Branch\"/>\n")
self.D.createModel("Branch")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_Model_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapSBML_L2v1(" <model id=\"Branch\"/>\n")
self.D.createModel("Branch")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_Model_L2v1_skipOptional(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapSBML_L2v1(" <model/>\n")
self.D.createModel()
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_Model_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapSBML_L2v2(" <model sboTerm=\"SBO:0000004\" id=\"Branch\"/>\n")
m = self.D.createModel("Branch")
m.setSBOTerm(4)
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_Model_skipOptional(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapSBML_L1v2(" <model/>\n")
self.D.createModel()
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_NaN(self):
expected = "<parameter id=\"p\" value=\"NaN\"/>";
p = self.D.createModel().createParameter()
p.setId("p")
p.setValue(util_NaN())
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_NegINF(self):
expected = "<parameter id=\"p\" value=\"-INF\"/>";
p = self.D.createModel().createParameter()
p.setId("p")
p.setValue(util_NegInf())
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<parameter name=\"Km1\" value=\"2.3\" units=\"second\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
p.setValue(2.3)
p.setUnits("second")
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_ParameterRule(self):
self.D.setLevelAndVersion(1,1,False)
expected = wrapString("<parameterRule " + "formula=\"p * t\" type=\"rate\" name=\"p\"/>")
self.D.createModel()
self.D.getModel().createParameter().setId("p")
r = self.D.getModel().createRateRule()
r.setVariable("p")
r.setFormula("p * t")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_ParameterRule_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<rateRule variable=\"p\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> p </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</rateRule>")
self.D.createModel()
self.D.getModel().createParameter().setId("p")
r = self.D.getModel().createRateRule()
r.setVariable("p")
r.setFormula("p * t")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_ParameterRule_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<rateRule sboTerm=\"SBO:0000007\" variable=\"p\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> p </ci>\n" +
" <ci> t </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</rateRule>")
self.D.createModel()
self.D.getModel().createParameter().setId("p")
r = self.D.getModel().createRateRule()
r.setVariable("p")
r.setFormula("p * t")
r.setSBOTerm(7)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_ParameterRule_defaults(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<parameterRule formula=\"p * t\" name=\"p\"/>";
self.D.createModel()
self.D.getModel().createParameter().setId("p")
r = self.D.getModel().createAssignmentRule()
r.setVariable("p")
r.setFormula("p * t")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Parameter_L1v1_required(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<parameter name=\"Km1\" value=\"NaN\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
p.unsetValue()
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter_L1v2_skipOptional(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<parameter name=\"Km1\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
p.unsetValue()
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<parameter id=\"Km1\" value=\"2.3\" units=\"second\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
p.setValue(2.3)
p.setUnits("second")
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter_L2v1_constant(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<parameter id=\"x\" constant=\"false\"/>";
p = self.D.createModel().createParameter()
p.setId("x")
p.setConstant(False)
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter_L2v1_skipOptional(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<parameter id=\"Km1\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Parameter_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<parameter sboTerm=\"SBO:0000002\" id=\"Km1\" value=\"2.3\" units=\"second\"/>";
p = self.D.createModel().createParameter()
p.setId("Km1")
p.setValue(2.3)
p.setUnits("second")
p.setSBOTerm(2)
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def test_WriteSBML_Reaction(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<reaction name=\"r\" reversible=\"false\" fast=\"true\"/>";
r = self.D.createModel().createReaction()
r.setId("r")
r.setReversible(False)
r.setFast(True)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Reaction_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<reaction id=\"r\" reversible=\"false\"/>";
r = self.D.createModel().createReaction()
r.setId("r")
r.setReversible(False)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Reaction_L2v1_full(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<reaction id=\"v1\">\n" +
" <listOfReactants>\n" +
" <speciesReference species=\"x0\"/>\n" +
" </listOfReactants>\n" +
" <listOfProducts>\n" +
" <speciesReference species=\"s1\"/>\n" +
" </listOfProducts>\n" +
" <listOfModifiers>\n" +
" <modifierSpeciesReference species=\"m1\"/>\n" +
" </listOfModifiers>\n" +
" <kineticLaw>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> vm </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" <apply>\n" +
" <plus/>\n" +
" <ci> km </ci>\n" +
" <ci> s1 </ci>\n" +
" </apply>\n" +
" </apply>\n" +
" </math>\n" +
" </kineticLaw>\n" +
"</reaction>")
self.D.createModel()
r = self.D.getModel().createReaction()
r.setId("v1")
r.createReactant().setSpecies("x0")
r.createProduct().setSpecies("s1")
r.createModifier().setSpecies("m1")
r.createKineticLaw().setFormula("(vm * s1)/(km + s1)")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Reaction_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<reaction sboTerm=\"SBO:0000064\" id=\"r\" name=\"r1\" reversible=\"false\" fast=\"true\"/>";
r = self.D.createModel().createReaction()
r.setId("r")
r.setName("r1")
r.setReversible(False)
r.setFast(True)
r.setSBOTerm(64)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Reaction_defaults(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<reaction name=\"r\"/>";
r = self.D.createModel().createReaction()
r.setId("r")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_Reaction_full(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<reaction name=\"v1\">\n" +
" <listOfReactants>\n" +
" <speciesReference species=\"x0\"/>\n" +
" </listOfReactants>\n" +
" <listOfProducts>\n" +
" <speciesReference species=\"s1\"/>\n" +
" </listOfProducts>\n" +
" <kineticLaw formula=\"(vm * s1)/(km + s1)\"/>\n" +
"</reaction>")
self.D.createModel()
r = self.D.getModel().createReaction()
r.setId("v1")
r.createReactant().setSpecies("x0")
r.createProduct().setSpecies("s1")
r.createKineticLaw().setFormula("(vm * s1)/(km + s1)")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SBMLDocument_L1v1(self):
self.D.setLevelAndVersion(1,1,False)
expected = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level1\" " + "level=\"1\" version=\"1\"/>\n")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_SBMLDocument_L1v2(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level1\" " + "level=\"1\" version=\"2\"/>\n")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_SBMLDocument_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2\" " + "level=\"2\" version=\"1\"/>\n")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_SBMLDocument_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapXML("<sbml xmlns=\"http://www.sbml.org/sbml/level2/version2\" " + "level=\"2\" version=\"2\"/>\n")
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_Species(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<species name=\"Ca2\" compartment=\"cell\" initialAmount=\"0.7\"" + " units=\"mole\" boundaryCondition=\"true\" charge=\"2\"/>")
s = self.D.createModel().createSpecies()
s.setName("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
s.setUnits("mole")
s.setBoundaryCondition(True)
s.setCharge(2)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_SpeciesConcentrationRule(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<speciesConcentrationRule " + "formula=\"t * s\" type=\"rate\" species=\"s\"/>")
self.D.createModel()
self.D.getModel().createSpecies().setId("s")
r = self.D.getModel().createRateRule()
r.setVariable("s")
r.setFormula("t * s")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SpeciesConcentrationRule_L1v1(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<specieConcentrationRule formula=\"t * s\" specie=\"s\"/>";
self.D.createModel()
self.D.getModel().createSpecies().setId("s")
r = self.D.getModel().createAssignmentRule()
r.setVariable("s")
r.setFormula("t * s")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SpeciesConcentrationRule_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<assignmentRule variable=\"s\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> t </ci>\n" +
" <ci> s </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</assignmentRule>")
self.D.createModel()
self.D.getModel().createSpecies().setId("s")
r = self.D.getModel().createAssignmentRule()
r.setVariable("s")
r.setFormula("t * s")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SpeciesConcentrationRule_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<assignmentRule sboTerm=\"SBO:0000006\" variable=\"s\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <times/>\n" +
" <ci> t </ci>\n" +
" <ci> s </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</assignmentRule>")
self.D.createModel()
self.D.getModel().createSpecies().setId("s")
r = self.D.getModel().createAssignmentRule()
r.setVariable("s")
r.setFormula("t * s")
r.setSBOTerm(6)
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SpeciesConcentrationRule_defaults(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<speciesConcentrationRule formula=\"t * s\" species=\"s\"/>";
self.D.createModel()
self.D.getModel().createSpecies().setId("s")
r = self.D.getModel().createAssignmentRule()
r.setVariable("s")
r.setFormula("t * s")
self.assertEqual( True, self.equals(expected,r.toSBML()) )
pass
def test_WriteSBML_SpeciesReference(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<speciesReference species=\"s\" stoichiometry=\"3\" denominator=\"2\"/>";
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3)
sr.setDenominator(2)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L1v1(self):
self.D.setLevelAndVersion(1,1,False)
expected = "<specieReference specie=\"s\" stoichiometry=\"3\" denominator=\"2\"/>";
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3)
sr.setDenominator(2)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L2v1_1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<speciesReference species=\"s\">\n" +
" <stoichiometryMath>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"rational\"> 3 <sep/> 2 </cn>\n" +
" </math>\n" +
" </stoichiometryMath>\n" +
"</speciesReference>")
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3)
sr.setDenominator(2)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L2v1_2(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<speciesReference species=\"s\" stoichiometry=\"3.2\"/>";
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3.2)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L2v1_3(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<speciesReference species=\"s\">\n" +
" <stoichiometryMath>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <ci> d </ci>\n" +
" </apply>\n" +
" </math>\n" +
" </stoichiometryMath>\n" +
"</speciesReference>")
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
math = libsbml.parseFormula("1/d")
stoich = sr.createStoichiometryMath()
stoich.setMath(math)
sr.setStoichiometryMath(stoich)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L2v2_1(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<speciesReference sboTerm=\"SBO:0000009\" id=\"ss\" name=\"odd\" species=\"s\">\n" +
" <stoichiometryMath>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <cn type=\"rational\"> 3 <sep/> 2 </cn>\n" +
" </math>\n" +
" </stoichiometryMath>\n" +
"</speciesReference>")
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3)
sr.setDenominator(2)
sr.setId("ss")
sr.setName("odd")
sr.setSBOTerm(9)
sr.setId("ss")
sr.setName("odd")
sr.setSBOTerm(9)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_L2v3_1(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<speciesReference sboTerm=\"SBO:0000009\" id=\"ss\" name=\"odd\" species=\"s\" stoichiometry=\"3.2\"/>";
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
sr.setStoichiometry(3.2)
sr.setId("ss")
sr.setName("odd")
sr.setSBOTerm(9)
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesReference_defaults(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<speciesReference species=\"s\"/>";
sr = self.D.createModel().createReaction().createReactant()
sr.setSpecies("s")
self.assertEqual( True, self.equals(expected,sr.toSBML()) )
pass
def test_WriteSBML_SpeciesType(self):
self.D.setLevelAndVersion(2,2,False)
expected = "<speciesType id=\"st\"/>";
st = self.D.createModel().createSpeciesType()
st.setId("st")
st.setSBOTerm(4)
self.assertEqual( True, self.equals(expected,st.toSBML()) )
pass
def test_WriteSBML_SpeciesType_withSBO(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<speciesType sboTerm=\"SBO:0000004\" id=\"st\"/>";
st = self.D.createModel().createSpeciesType()
st.setId("st")
st.setSBOTerm(4)
self.assertEqual( True, self.equals(expected,st.toSBML()) )
pass
def test_WriteSBML_Species_L1v1(self):
self.D.setLevelAndVersion(1,1,False)
expected = wrapString("<specie name=\"Ca2\" compartment=\"cell\" initialAmount=\"0.7\"" + " units=\"mole\" boundaryCondition=\"true\" charge=\"2\"/>")
s = self.D.createModel().createSpecies()
s.setName("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
s.setUnits("mole")
s.setBoundaryCondition(True)
s.setCharge(2)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<species id=\"Ca2\" compartment=\"cell\" initialAmount=\"0.7\" " + "substanceUnits=\"mole\" constant=\"true\"/>")
s = self.D.createModel().createSpecies()
s.setId("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
s.setSubstanceUnits("mole")
s.setConstant(True)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_L2v1_skipOptional(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<species id=\"Ca2\" compartment=\"cell\"/>";
s = self.D.createModel().createSpecies()
s.setId("Ca2")
s.setCompartment("cell")
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapString("<species id=\"Ca2\" speciesType=\"st\" compartment=\"cell\" initialAmount=\"0.7\" " + "substanceUnits=\"mole\" constant=\"true\"/>")
s = self.D.createModel().createSpecies()
s.setId("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
s.setSubstanceUnits("mole")
s.setConstant(True)
s.setSpeciesType("st")
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_L2v3(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<species sboTerm=\"SBO:0000007\" id=\"Ca2\" compartment=\"cell\"/>";
s = self.D.createModel().createSpecies()
s.setId("Ca2")
s.setCompartment("cell")
s.setSBOTerm(7)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_defaults(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<species name=\"Ca2\" compartment=\"cell\" initialAmount=\"0.7\"" + " units=\"mole\" charge=\"2\"/>")
s = self.D.createModel().createSpecies()
s.setName("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
s.setUnits("mole")
s.setCharge(2)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_Species_skipOptional(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<species name=\"Ca2\" compartment=\"cell\" initialAmount=\"0.7\"/>";
s = self.D.createModel().createSpecies()
s.setId("Ca2")
s.setCompartment("cell")
s.setInitialAmount(0.7)
self.assertEqual( True, self.equals(expected,s.toSBML()) )
pass
def test_WriteSBML_StoichiometryMath(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<stoichiometryMath>\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <ci> d </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</stoichiometryMath>")
math = libsbml.parseFormula("1/d")
stoich = self.D.createModel().createReaction().createReactant().createStoichiometryMath()
stoich.setMath(math)
self.assertEqual( True, self.equals(expected,stoich.toSBML()) )
pass
def test_WriteSBML_StoichiometryMath_withSBO(self):
self.D.setLevelAndVersion(2,3,False)
expected = wrapString("<stoichiometryMath sboTerm=\"SBO:0000333\">\n" +
" <math xmlns=\"http://www.w3.org/1998/Math/MathML\">\n" +
" <apply>\n" +
" <divide/>\n" +
" <cn type=\"integer\"> 1 </cn>\n" +
" <ci> d </ci>\n" +
" </apply>\n" +
" </math>\n" +
"</stoichiometryMath>")
math = libsbml.parseFormula("1/d")
stoich = self.D.createModel().createReaction().createReactant().createStoichiometryMath()
stoich.setMath(math)
stoich.setSBOTerm(333)
self.assertEqual( True, self.equals(expected,stoich.toSBML()) )
pass
def test_WriteSBML_Unit(self):
self.D.setLevelAndVersion(2,4,False)
expected = "<unit kind=\"kilogram\" exponent=\"2\" scale=\"-3\"/>";
u = self.D.createModel().createUnitDefinition().createUnit()
u.setKind(libsbml.UNIT_KIND_KILOGRAM)
u.setExponent(2)
u.setScale(-3)
self.assertEqual( True, self.equals(expected,u.toSBML()) )
pass
def test_WriteSBML_UnitDefinition(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<unitDefinition name=\"mmls\"/>";
ud = self.D.createModel().createUnitDefinition()
ud.setId("mmls")
self.assertEqual( True, self.equals(expected,ud.toSBML()) )
pass
def test_WriteSBML_UnitDefinition_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<unitDefinition id=\"mmls\"/>";
ud = self.D.createModel().createUnitDefinition()
ud.setId("mmls")
self.assertEqual( True, self.equals(expected,ud.toSBML()) )
pass
def test_WriteSBML_UnitDefinition_L2v1_full(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapString("<unitDefinition id=\"Fahrenheit\">\n" +
" <listOfUnits>\n" +
" <unit kind=\"Celsius\" multiplier=\"1.8\" offset=\"32\"/>\n" +
" </listOfUnits>\n" +
"</unitDefinition>")
ud = self.D.createModel().createUnitDefinition()
ud.setId("Fahrenheit")
u1 = ud.createUnit()
u1.setKind(libsbml.UnitKind_forName("Celsius"))
u1.setMultiplier(1.8)
u1.setOffset(32)
self.assertEqual( True, self.equals(expected,ud.toSBML()) )
pass
def test_WriteSBML_UnitDefinition_full(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapString("<unitDefinition name=\"mmls\">\n" +
" <listOfUnits>\n" +
" <unit kind=\"mole\" scale=\"-3\"/>\n" +
" <unit kind=\"liter\" exponent=\"-1\"/>\n" +
" <unit kind=\"second\" exponent=\"-1\"/>\n" +
" </listOfUnits>\n" +
"</unitDefinition>")
ud = self.D.createModel().createUnitDefinition()
ud.setId("mmls")
u1 = ud.createUnit()
u1.setKind(libsbml.UNIT_KIND_MOLE)
u1.setScale(-3)
u2 = ud.createUnit()
u2.setKind(libsbml.UNIT_KIND_LITER)
u2.setExponent(-1)
u3 = ud.createUnit()
u3.setKind(libsbml.UNIT_KIND_SECOND)
u3.setExponent(-1)
self.assertEqual( True, self.equals(expected,ud.toSBML()) )
pass
def test_WriteSBML_Unit_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = "<unit kind=\"Celsius\" multiplier=\"1.8\" offset=\"32\"/>";
u = self.D.createModel().createUnitDefinition().createUnit()
u.setKind(libsbml.UnitKind_forName("Celsius"))
u.setMultiplier(1.8)
u.setOffset(32)
self.assertEqual( True, self.equals(expected,u.toSBML()) )
pass
def test_WriteSBML_Unit_defaults(self):
self.D.setLevelAndVersion(1,2,False)
expected = "<unit kind=\"kilogram\"/>";
u = self.D.createModel().createUnitDefinition().createUnit()
u.setKind(libsbml.UNIT_KIND_KILOGRAM)
self.assertEqual( True, self.equals(expected,u.toSBML()) )
pass
def test_WriteSBML_Unit_l2v3(self):
self.D.setLevelAndVersion(2,3,False)
expected = "<unit kind=\"kilogram\" exponent=\"2\" scale=\"-3\"/>";
u = self.D.createModel().createUnitDefinition().createUnit()
u.setKind(libsbml.UNIT_KIND_KILOGRAM)
u.setExponent(2)
u.setScale(-3)
u.setOffset(32)
self.assertEqual( True, self.equals(expected,u.toSBML()) )
pass
def test_WriteSBML_elements_L1v2(self):
self.D.setLevelAndVersion(1,2,False)
expected = wrapSBML_L1v2(" <model>\n" +
" <listOfUnitDefinitions>\n" +
" <unitDefinition/>\n" +
" </listOfUnitDefinitions>\n" +
" <listOfCompartments>\n" +
" <compartment/>\n" +
" </listOfCompartments>\n" +
" <listOfSpecies>\n" +
" <species initialAmount=\"0\"/>\n" +
" </listOfSpecies>\n" +
" <listOfParameters>\n" +
" <parameter/>\n" +
" </listOfParameters>\n" +
" <listOfRules>\n" +
" <algebraicRule/>\n" +
" </listOfRules>\n" +
" <listOfReactions>\n" +
" <reaction/>\n" +
" </listOfReactions>\n" +
" </model>\n")
m = self.D.createModel()
m.createUnitDefinition()
m.createCompartment()
m.createParameter()
m.createAlgebraicRule()
m.createReaction()
m.createSpecies()
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_elements_L2v1(self):
self.D.setLevelAndVersion(2,1,False)
expected = wrapSBML_L2v1(" <model>\n" +
" <listOfFunctionDefinitions>\n" +
" <functionDefinition/>\n" +
" </listOfFunctionDefinitions>\n" +
" <listOfUnitDefinitions>\n" +
" <unitDefinition/>\n" +
" </listOfUnitDefinitions>\n" +
" <listOfCompartments>\n" +
" <compartment/>\n" +
" </listOfCompartments>\n" +
" <listOfSpecies>\n" +
" <species/>\n" +
" </listOfSpecies>\n" +
" <listOfParameters>\n" +
" <parameter/>\n" +
" </listOfParameters>\n" +
" <listOfRules>\n" +
" <algebraicRule/>\n" +
" </listOfRules>\n" +
" <listOfReactions>\n" +
" <reaction/>\n" +
" </listOfReactions>\n" +
" <listOfEvents>\n" +
" <event/>\n" +
" </listOfEvents>\n" +
" </model>\n")
m = self.D.createModel()
m.createUnitDefinition()
m.createFunctionDefinition()
m.createCompartment()
m.createEvent()
m.createParameter()
m.createAlgebraicRule()
m.createInitialAssignment()
m.createConstraint()
m.createReaction()
m.createSpecies()
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_elements_L2v2(self):
self.D.setLevelAndVersion(2,2,False)
expected = wrapSBML_L2v2(" <model>\n" +
" <listOfFunctionDefinitions>\n" +
" <functionDefinition/>\n" +
" </listOfFunctionDefinitions>\n" +
" <listOfUnitDefinitions>\n" +
" <unitDefinition/>\n" +
" </listOfUnitDefinitions>\n" +
" <listOfCompartmentTypes>\n" +
" <compartmentType/>\n" +
" </listOfCompartmentTypes>\n" +
" <listOfSpeciesTypes>\n" +
" <speciesType/>\n" +
" </listOfSpeciesTypes>\n" +
" <listOfCompartments>\n" +
" <compartment/>\n" +
" </listOfCompartments>\n" +
" <listOfSpecies>\n" +
" <species/>\n" +
" </listOfSpecies>\n" +
" <listOfParameters>\n" +
" <parameter/>\n" +
" </listOfParameters>\n" +
" <listOfInitialAssignments>\n" +
" <initialAssignment/>\n" +
" </listOfInitialAssignments>\n" +
" <listOfRules>\n" +
" <algebraicRule/>\n" +
" </listOfRules>\n" +
" <listOfConstraints>\n" +
" <constraint/>\n" +
" </listOfConstraints>\n" +
" <listOfReactions>\n" +
" <reaction/>\n" +
" </listOfReactions>\n" +
" <listOfEvents>\n" +
" <event/>\n" +
" </listOfEvents>\n" +
" </model>\n")
m = self.D.createModel()
m.createUnitDefinition()
m.createFunctionDefinition()
m.createCompartmentType()
m.createSpeciesType()
m.createCompartment()
m.createEvent()
m.createParameter()
m.createAlgebraicRule()
m.createInitialAssignment()
m.createConstraint()
m.createReaction()
m.createSpecies()
self.S = libsbml.writeSBMLToString(self.D)
self.assertEqual( True, self.equals(expected,self.S) )
pass
def test_WriteSBML_error(self):
d = libsbml.SBMLDocument()
w = libsbml.SBMLWriter()
self.assertEqual( False, w.writeSBML(d, "/tmp/impossible/path/should/fail") )
self.assert_( d.getNumErrors() == 1 )
self.assert_( d.getError(0).getErrorId() == libsbml.XMLFileUnwritable )
d = None
w = None
pass
def test_WriteSBML_locale(self):
expected = "<parameter id=\"p\" value=\"3.31\" constant=\"true\"/>";
p = self.D.createModel().createParameter()
p.setId("p")
p.setValue(3.31)
p.setConstant(True)
self.assertEqual( True, self.equals(expected,p.toSBML()) )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestWriteSBML))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestWriteSBML.py
|
Python
|
bsd-3-clause
| 56,577
|
[
"VisIt"
] |
21001f09be8e6c3824ecb94f55192623d571066bbc450ad39a071d797a09aa7e
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import Rectangle # Used to make dummy legend
# Open CSV File from Simulation
datafileS = open('../MASTER_DISTRIBUTIONS/HIIregion_popSynthesis.csv', 'r')
csvFileS = []
for row in datafileS:
csvFileS.append(row.strip().split(','))
# Save Galactic Radius Info from Simulation to new list
##
## Set Search Space Parameters and Initialization
##
northMin = 18
northMax = 65
southMin = 275-360
southMax = 340-360
northCompleteness = pow(10,-2)
southCompleteness= pow(10,0.32)
northCandCSV = 'ATCA_candidates_phot_north.csv'
#northKnownCSV = 'ATCA_known_phot_north.csv'
northKnownCSV = 'wise_test_NEW_updated.csv'
southCandCSV = 'ATCA_candidates_phot_south.csv'
southKnownCSV = 'ATCA_known_phot_south.csv'
longN = list()
lumN = list()
longS = list()
lumS = list()
compN = list()
compS = list()
lumKnownNorth = list()
lumCandNorth = list()
lumKnownAndCandNorth = list()
indexS = 0
srcCountN = 0
lumKnownSouth = list()
lumCandSouth = list()
lumKnownAndCandSouth = list()
srcCountS = 0
logFlMinO = 48
logFlMinB2 = 45.57
(negFourBinN,negThreeBinN,negTwoBinN,negOneBinN,zeroBinN,oneBinN,twoBinN,threeBinN,fourBinN,fiveBinN,sixBinN) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinCN,negThreeBinCN,negTwoBinCN,negOneBinCN,zeroBinCN,oneBinCN,twoBinCN,threeBinCN,fourBinCN,fiveBinCN,sixBinCN) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinKN,negThreeBinKN,negTwoBinKN,negOneBinKN,zeroBinKN,oneBinKN,twoBinKN,threeBinKN,fourBinKN,fiveBinKN,sixBinKN) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinKCN,negThreeBinKCN,negTwoBinKCN,negOneBinKCN,zeroBinKCN,oneBinKCN,twoBinKCN,threeBinKCN,fourBinKCN,fiveBinKCN,sixBinKCN) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinS,negThreeBinS,negTwoBinS,negOneBinS,zeroBinS,oneBinS,twoBinS,threeBinS,fourBinS,fiveBinS,sixBinS) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinCS,negThreeBinCS,negTwoBinCS,negOneBinCS,zeroBinCS,oneBinCS,twoBinCS,threeBinCS,fourBinCS,fiveBinCS,sixBinCS) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinKS,negThreeBinKS,negTwoBinKS,negOneBinKS,zeroBinKS,oneBinKS,twoBinKS,threeBinKS,fourBinKS,fiveBinKS,sixBinKS) = (0,0,0,0,0,0,0,0,0,0,0)
(negFourBinKCS,negThreeBinKCS,negTwoBinKCS,negOneBinKCS,zeroBinKCS,oneBinKCS,twoBinKCS,threeBinKCS,fourBinKCS,fiveBinKCS,sixBinKCS) = (0,0,0,0,0,0,0,0,0,0,0)
##
## Northern Regions
##
# Simulated Regions in Both the North and South
while indexS < len(csvFileS) :
lS = float(csvFileS[indexS][8])
x = float(csvFileS[indexS][1])
y = float(csvFileS[indexS][2])
z = float(csvFileS[indexS][3])
d2 = pow(x,2)+pow(y-8.5,2)+pow(z,2) # Distance from sun to source squared
logFl = float(csvFileS[indexS][5])/4+47
fl = pow(10,logFl)
b = .9688 # Eq 7 Tremblin et al. Assume Te=10^4, freq=1.4 GHz
flS = fl/(7.603*pow(10,46))*b/d2
if (logFl >= logFlMinO) and (lS < northMax) and (lS > northMin) :
longN.append(lS)
lumN.append(flS)
srcCountN += 1
if flS <= pow(10,-3) :
negFourBinN += 1
elif flS <= pow(10,-2) :
negThreeBinN += 1
elif flS <= pow(10,-1) :
negTwoBinN += 1
elif flS <= pow(10,0) :
negOneBinN += 1
elif flS <= pow(10,1) :
zeroBinN += 1
elif flS <= pow(10,2) :
oneBinN += 1
elif flS <= pow(10,3) :
twoBinN += 1
elif flS <= pow(10,4) :
threeBinN += 1
elif flS <= pow(10,5) :
fourBinN += 1
elif flS <= pow(10,6) :
fiveBinN += 1
if flS >= northCompleteness :
compN.append(flS)
elif (logFl >= logFlMinO) and (lS < southMax) and (lS > southMin) :
longS.append(lS)
lumS.append(flS)
if flS <= pow(10,-3) :
negFourBinS += 1
elif flS <= pow(10,-2) :
negThreeBinS += 1
elif flS <= pow(10,-1) :
negTwoBinS += 1
elif flS <= pow(10,0) :
negOneBinS += 1
elif flS <= pow(10,1) :
zeroBinS += 1
elif flS <= pow(10,2) :
oneBinS += 1
elif flS <= pow(10,3) :
twoBinS += 1
elif flS <= pow(10,4) :
threeBinS += 1
elif flS <= pow(10,5) :
fourBinS += 1
elif flS <= pow(10,6) :
fiveBinS += 1
if flS >= southCompleteness :
compS.append(flS)
indexS += 1
indexS = 0
(compNB2,longNB2,lumNB2,longSB2,lumSB2,compSB2)=(list(),list(),list(),list(),list(),list())
# Simulated Regions in Both the North and South (O9.5 and above)
while indexS < len(csvFileS) :
lS = float(csvFileS[indexS][8])
x = float(csvFileS[indexS][1])
y = float(csvFileS[indexS][2])
z = float(csvFileS[indexS][3])
d2 = pow(x,2)+pow(y-8.5,2)+pow(z,2) # Distance from sun to source squared
logFl = float(csvFileS[indexS][5])/4+47
fl = pow(10,logFl)
b = .9688 # Eq 7 Tremblin et al. Assume Te=10^4, freq=1.4 GHz
flS = fl/(7.603*pow(10,46))*b/d2
if (logFl >= logFlMinB2) and (lS < northMax) and (lS > northMin) :
longNB2.append(lS)
lumNB2.append(flS)
srcCountN += 1
if flS >= northCompleteness :
compNB2.append(flS)
elif (logFl >= logFlMinB2) and (lS < southMax) and (lS > southMin) :
longSB2.append(lS)
lumSB2.append(flS)
if flS >= southCompleteness :
compSB2.append(flS)
indexS += 1
# Open CSV File from Candidate Regions in North (B2 and above)
datafileCandNorth = open(northCandCSV, 'r')
csvFileCandNorth = []
for row in datafileCandNorth:
csvFileCandNorth.append(row.strip().split(','))
# Save Galactic Radius Info from Simulation to new list
lumCandNorth = list()
indexCandNorth = 0
while indexCandNorth < len(csvFileCandNorth) :
lCandNorth = float(csvFileCandNorth[indexCandNorth][2])
if lCandNorth > 0 :
lumCandNorth.append(lCandNorth)
if lCandNorth <= pow(10,-3) :
negFourBinCN += 1
elif lCandNorth <= pow(10,-2) :
negThreeBinCN += 1
elif lCandNorth <= pow(10,-1) :
negTwoBinCN += 1
elif lCandNorth <= pow(10,0) :
negOneBinCN += 1
elif lCandNorth <= pow(10,1) :
zeroBinCN += 1
elif lCandNorth <= pow(10,2) :
oneBinCN += 1
elif lCandNorth <= pow(10,3) :
twoBinCN += 1
elif lCandNorth <= pow(10,4) :
threeBinCN += 1
elif lCandNorth <= pow(10,5) :
fourBinCN += 1
elif lCandNorth <= pow(10,6) :
fiveBinCN += 1
indexCandNorth += 1
# Open CSV File from Known Regions in North
datafileKnownNorth = open(northKnownCSV, 'r')
csvFileKnownNorth = []
for row in datafileKnownNorth:
csvFileKnownNorth.append(row.strip().split(','))
# Save Galactic Radius Info from Simulation to new list
indexKnownNorth = 1
while indexKnownNorth < len(csvFileKnownNorth) :
#lKnownNorth = float(csvFileKnownNorth[indexKnownNorth][2])
lKnownNorth = float(csvFileKnownNorth[indexKnownNorth][89]) # For Zoltan's file
if lKnownNorth > 0 :
lumKnownNorth.append(lKnownNorth)
if lKnownNorth <= pow(10,-3) :
negFourBinKN += 1
elif lKnownNorth <= pow(10,-2) :
negThreeBinKN += 1
elif lKnownNorth <= pow(10,-1) :
negTwoBinKN += 1
elif lKnownNorth <= pow(10,0) :
negOneBinKN += 1
elif lKnownNorth <= pow(10,1) :
zeroBinKN += 1
elif lKnownNorth <= pow(10,2) :
oneBinKN += 1
elif lKnownNorth <= pow(10,3) :
twoBinKN += 1
elif lKnownNorth <= pow(10,4) :
threeBinKN += 1
elif lKnownNorth <= pow(10,5) :
fourBinKN += 1
elif lKnownNorth <= pow(10,6) :
fiveBinKN += 1
indexKnownNorth += 1
lumKnownAndCandNorth = lumCandNorth + lumKnownNorth
negFourBinKCN = negFourBinKN + negFourBinCN
negThreeBinKCN = negThreeBinKN + negThreeBinCN
negTwoBinKCN = negTwoBinKN + negTwoBinCN
negOneBinKCN = negOneBinKN + negOneBinCN
zeroBinKCN = zeroBinKN + zeroBinCN
oneBinKCN = oneBinKN + oneBinCN
twoBinKCN = twoBinKN + twoBinCN
threeBinKCN = threeBinKN + threeBinCN
fourBinKCN = fourBinKN + fourBinCN
fiveBinKCN = fiveBinKN + fiveBinCN
sixBinKCN = sixBinKN + sixBinCN
##
## Southern Regions
##
## NOTE : Simulated regions in South were already accounted for earlier.
# Open CSV File from Candidate Regions in South
datafileCandSouth = open(southCandCSV, 'r')
csvFileCandSouth = []
for row in datafileCandSouth:
csvFileCandSouth.append(row.strip().split(','))
# Save Galactic Radius Info from Simulation to new list
lumCandSouth = list()
indexCandSouth = 0
while indexCandSouth < len(csvFileCandSouth) :
lCandSouth = float(csvFileCandSouth[indexCandSouth][2])
if lCandSouth > 0 :
lumCandSouth.append(lCandSouth)
if lCandSouth <= pow(10,-3) :
negFourBinCS += 1
elif lCandSouth<= pow(10,-2) :
negThreeBinCS += 1
elif lCandSouth <= pow(10,-1) :
negTwoBinCS += 1
elif lCandSouth <= pow(10,0) :
negOneBinCS += 1
elif lCandSouth <= pow(10,1) :
zeroBinCS += 1
elif lCandSouth <= pow(10,2) :
oneBinCS += 1
elif lCandSouth <= pow(10,3) :
twoBinCS += 1
elif lCandSouth <= pow(10,4) :
threeBinCS += 1
elif lCandSouth <= pow(10,5) :
fourBinCS += 1
elif lCandSouth <= pow(10,6) :
fiveBinCS += 1
indexCandSouth += 1
# Open CSV File from Known Regions in North
datafileKnownSouth = open(southKnownCSV, 'r')
csvFileKnownSouth = []
for row in datafileKnownSouth:
csvFileKnownSouth.append(row.strip().split(','))
# Save Galactic Radius Info from Simulation to new list
indexKnownSouth = 0
while indexKnownSouth < len(csvFileKnownSouth) :
lKnownSouth = float(csvFileKnownSouth[indexKnownSouth][2])
if lKnownSouth > 0 :
lumKnownSouth.append(lKnownSouth)
if lKnownSouth <= pow(10,-3) :
negFourBinKS += 1
elif lKnownSouth <= pow(10,-2) :
negThreeBinKS += 1
elif lKnownSouth <= pow(10,-1) :
negTwoBinKS += 1
elif lKnownSouth <= pow(10,0) :
negOneBinKS += 1
elif lKnownSouth <= pow(10,1) :
zeroBinKS += 1
elif lKnownSouth <= pow(10,2) :
oneBinKS += 1
elif lKnownSouth <= pow(10,3) :
twoBinKS += 1
elif lKnownSouth <= pow(10,4) :
threeBinKS += 1
elif lKnownSouth <= pow(10,5) :
fourBinKS += 1
elif lKnownSouth <= pow(10,6) :
fiveBinKS += 1
indexKnownSouth += 1
lumKnownAndCandSouth = lumCandSouth + lumKnownSouth
negFourBinKCS = negFourBinKS + negFourBinCS
negThreeBinKCS = negThreeBinKS + negThreeBinCS
negTwoBinKCS = negTwoBinKS + negTwoBinCS
negOneBinKCS = negOneBinKS + negOneBinCS
zeroBinKCS = zeroBinKS + zeroBinCS
oneBinKCS = oneBinKS + oneBinCS
twoBinKCS = twoBinKS + twoBinCS
threeBinKCS = threeBinKS + threeBinCS
fourBinKCS = fourBinKS + fourBinCS
fiveBinKCS = fiveBinKS + fiveBinCS
sixBinKCS = sixBinKS + sixBinCS
print "--------------------"
print "--Northern Regions--"
print "Simulated : " + str(len(lumN))
print "Candidate : " + str(len(lumCandNorth))
print "Known : " + str(len(lumKnownNorth))
print "Known & Cand : " + str(len(lumCandNorth)+len(lumKnownNorth))
print "--------------------"
print "--Southern Regions--"
print "Simulated : " + str(len(lumS))
print "Candidate : " + str(len(lumCandSouth))
print "Known : " + str(len(lumKnownSouth))
print "Known & Cand : " + str(len(lumCandSouth)+len(lumKnownSouth))
print "--------------------"
##
## Here Starts Plotting
##
TxtSize = 16
LnWidth = 2
figWidth = 8
figHeight = 4.5
compColor = "#E6E6E6"
simColor = '#B40404'#'red'
knownColor = '#0174DF'#'#81DAF5'
candColor = '#2E9AFE'
cAndKColor = '#0101DF'
##
## Plotting Northern Data
##
# Produce histogram of data
#plt.title("Continuum Flux versus Galactic Longitude across Galaxy (Simulated)")
#plt.xlabel("Galactic Longitude (deg)")
#plt.ylabel("Radio Continuum Integrated Flux at 1.4 GHz (Jy)")
#plt.scatter(longS,lumS,s=3,facecolor='0',lw=0)
#plt.yscale('log')
#plt.grid(True)
#plt.legend(loc='upper right')
#plt.xticks([-90,-75,-60,-45,-30,-15,0])
#plt.xticks([0,10,20,30,40,50,60,70,80,90])
#plt.xticks([-180,-135,-90,-45,0,45,90,135,180])
#plt.gca().invert_xaxis()
#plt.savefig('FluxRecVsLongitude_FirstQuad.eps', format='eps', dpi=1000)
#plt.show()
#print "Total Regions In Search Area : " + str(srcCount)
#print "Percent of Total Regions : " + str(srcCount*100/len(csvFileS))
# Produce histogram of Northern Data
#Completeness Limit
fig, (ax1,ax2) = plt.subplots(1,2, sharex=True, sharey=True)
#ax1.hist(compN, bins=np.logspace(-2, 2, 47),histtype='bar',color=compColor,linewidth=0,alpha=0.5)
ax1.hist(lumKnownNorth, bins=np.logspace(-3, 2, 59), histtype='step',color='black',linewidth=LnWidth+1)
ax1.hist(lumN, bins=np.logspace(-3, 2, 59), histtype='step',color='blue',linewidth=LnWidth)
ax1.hist(lumNB2, bins=np.logspace(-3, 2, 59), histtype='step',color='red',linewidth=LnWidth)
#ax1.hist(lumCandNorth, bins=np.logspace(-4,2, 47), histtype='step',color=candColor,linewidth=LnWidth)
#ax1.hist(lumKnownAndCandNorth, bins=np.logspace(-4, 2, 47), histtype='step',color=cAndKColor,linewidth=3)
ax1.tick_params(which='both',width=LnWidth,labelsize=TxtSize)
ax1.set_ylim([pow(10,0),pow(10,2)])
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_title("Quadrant I",fontsize=TxtSize)
ax1.set_ylabel("HII Region Count",fontsize=TxtSize)
#ax1.set_xlabel("Integrated Flux Density",fontsize=TxtSize)
ax1.xaxis.grid(True)
ax1.xaxis.grid(linewidth=1,linestyle='--')
#ax1.yaxis.grid(True)
#ax1.yaxis.grid(linewidth=2)
# Galactic Simulation Histogram Bin Values
#simPosN = pow(10,2.8)
#ax1.text(pow(10,-3.5),simPosN,negFourBinN, ha='center',color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,-2.5),simPosN,negThreeBinN,ha='center', color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,-1.5),simPosN,negTwoBinN,ha='center',color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,-0.5),simPosN,negOneBinN,ha='center', color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,0.5),simPosN,zeroBinN,ha='center',color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,1.5),simPosN,oneBinN, ha='center',color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,2.5),simPosN,twoBinN,ha='center',color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,3.5),simPosN,threeBinN,ha='center', color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,4.5),simPosN,fourBinN,ha='center', color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,5.5),simPosN,fiveBinN,ha='center', color=simColor,fontsize=TxtSize)
# Known Regions Histogram Bin Values
#knownPosN = pow(10,2.65)
#ax1.text(pow(10,-3.5),knownPosN,negFourBinKN,ha='center', color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,-2.5),knownPosN,negThreeBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,-1.5),knownPosN,negTwoBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,-0.5),knownPosN,negOneBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,0.5),knownPosN,zeroBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,1.5),knownPosN,oneBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,2.5),knownPosN,twoBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,3.5),knownPosN,threeBinKN,ha='center',color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,4.5),knownPosN,fourBinKN,ha='center', color=knownColor,fontsize=TxtSize)
#ax1.text(pow(10,5.5),knownPosN,fiveBinKN,ha='center',color=knownColor,fontsize=TxtSize)
# Candidate Histogram Bin Values
#candPosN = pow(10,2.5)
#ax1.text(pow(10,-3.5),candPosN,negFourBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,-2.5),candPosN,negThreeBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,-1.5),candPosN,negTwoBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,-0.5),candPosN,negOneBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,0.5),candPosN,zeroBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,1.5),candPosN,oneBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,2.5),candPosN,twoBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,3.5),candPosN,threeBinCN,ha='center',color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,4.5),candPosN,fourBinCN,ha='center', color=candColor,fontsize=TxtSize)
#ax1.text(pow(10,5.5),candPosN,fiveBinCN,ha='center',color=candColor,fontsize=TxtSize)
# Candidates Plus Known Histogram Bin Values
#cAndKPosN = pow(10,2.35)
#ax1.text(pow(10,-3.5),cAndKPosN,negFourBinKCN, ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,-2.5),cAndKPosN,negThreeBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,-1.5),cAndKPosN,negTwoBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,-0.5),cAndKPosN,negOneBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,0.5),cAndKPosN,zeroBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,1.5),cAndKPosN,oneBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,2.5),cAndKPosN,twoBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,3.5),cAndKPosN,threeBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,4.5),cAndKPosN,fourBinKCN,ha='center', color=cAndKColor,fontsize=TxtSize)
#ax1.text(pow(10,5.5),cAndKPosN,fiveBinKCN,ha='center',color=cAndKColor,fontsize=TxtSize)
##
## Plotting Southern Data
##
# Produce histogram of Southern Data
#ax2.hist(compS, bins=np.logspace(-2, 2, 47),histtype='bar',color=compColor,linewidth=0,alpha=0.5)
ax2.hist(lumKnownSouth, bins=np.logspace(-3, 2, 59), histtype='step',color='black',linewidth=LnWidth+1)
ax2.hist(lumS, bins=np.logspace(-3, 2, 59), histtype='step',color='blue',linewidth=LnWidth)
ax2.hist(lumSB2, bins=np.logspace(-3, 2, 59), histtype='step',color='red',linewidth=LnWidth)
#ax2.hist(lumCandSouth, bins=np.logspace(-4, 2, 47), histtype='step',color=candColor,linewidth=LnWidth)
#ax2.hist(lumKnownAndCandSouth, bins=np.logspace(-4, 2, 47), histtype='step',color=cAndKColor,linewidth=3)
ax2.tick_params(which='both',width=LnWidth,labelsize=TxtSize)
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_ylim([pow(10,0),pow(10,3)])
ax2.set_xlim([pow(10,-3),pow(10,2)])
ax2.set_title("Quadrant IV",fontsize=TxtSize)
#ax2.set_xlabel("Integrated Flux Density",fontsize=TxtSize)
ax2.xaxis.grid(True)
ax2.xaxis.grid(linewidth=1,linestyle='--')
#ax2.yaxis.grid(True)
#ax2.yaxis.grid(linewidth=2)
# Galactic Simulation Histogram Bin Values
#simPosS = pow(10,2.8)
#ax2.text(pow(10,-3.5),simPosS,negFourBinS, ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,-2.5),simPosS,negThreeBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,-1.5),simPosS,negTwoBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,-0.5),simPosS,negOneBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,0.5),simPosS,zeroBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,1.5),simPosS,oneBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,2.5),simPosS,twoBinS,ha='center', color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,3.5),simPosS,threeBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,4.5),simPosS,fourBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,5.5),simPosS,fiveBinS,ha='center',color=simColor,fontsize=TxtSize)
#ax2.text(pow(10,-3.5),simPosS,"Simulated Population", ha='center',color=simColor,fontsize=TxtSize)
# Known Regions Histogram Bin Values
#knownPosS = pow(10,2.65)
#ax2.text(pow(10,-3.5),knownPosS,negFourBinKS,ha='center', color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,-2.5),knownPosS,negThreeBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,-1.5),knownPosS,negTwoBinKS,ha='center', color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,-0.5),knownPosS,negOneBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,0.5),knownPosS,zeroBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,1.5),knownPosS,oneBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,2.5),knownPosS,twoBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,3.5),knownPosS,threeBinKS,ha='center', color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,4.5),knownPosS,fourBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,5.5),knownPosS,fiveBinKS,ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,-3.5),knownPosS,"Known Regions",ha='center', color=knownColor,fontsize=TxtSize)
# Candidate Histogram Bin Values
#candPosS = pow(10,2.5)
#ax2.text(pow(10,-3.5),candPosS,negFourBinCS, ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,-2.5),candPosS,negThreeBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,-1.5),candPosS,negTwoBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,-0.5),candPosS,negOneBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,0.5),candPosS,zeroBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,1.5),candPosS,oneBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,2.5),candPosS,twoBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,3.5),candPosS,threeBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,4.5),candPosS,fourBinCS,ha='center', color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,5.5),candPosS,fiveBinCS,ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,-3.5),candPosS,"Candidate Regions",ha='center', color=candColor,fontsize=TxtSize)
#Candidates Plus Known Histogram Bin Values
#cAndKPosS = pow(10,2.35)
#ax2.text(pow(10,-3.5),cAndKPosS, negFourBinKCS,ha = 'center', color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,-2.5),cAndKPosS, negThreeBinKCS,ha = 'center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,-1.5),cAndKPosS,negTwoBinKCS,ha='center', color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,-0.5),cAndKPosS,negOneBinKCS,ha='center', color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,0.5),cAndKPosS,zeroBinKCS,ha='center', color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,1.5),cAndKPosS,oneBinKCS,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,2.5),cAndKPosS,twoBinKCS,ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,3.5),cAndKPosS,threeBinKCS, ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,4.5),cAndKPosS,fourBinKCS, ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,5.5),cAndKPosS,fiveBinKCS, ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,-3.5),cAndKPosS,"Known and Candidate", ha='center',color=cAndKColor,fontsize=TxtSize)
ax1.spines['right'].set_visible(False)
#ax1.spines['left'].set_visible(False)
#ax1.spines['top'].set_visible(False)
#ax1.spines['bottom'].set_visible(False)
ax1.yaxis.set_ticks_position('left')
#ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
#ax2.spines['top'].set_visible(False)
#ax2.spines['bottom'].set_visible(False)
ax2.yaxis.set_ticks_position('right')
simPosLabel = pow(10,1.65)
knownPosLabel = pow(10,1.5)
candPosLabel = pow(10,1.5)
cAndKPosLabel = pow(10,1.25)
#ax1.text(pow(10,1),simPosLabel,"Simulated",ha='center', color=simColor,fontsize=TxtSize)
#ax1.text(pow(10,1),knownPosLabel,"Known",ha='center',color=knownColor,fontsize=TxtSize)
#ax2.text(pow(10,-4),candPosLabel,"Candidate", ha='center',color=candColor,fontsize=TxtSize)
#ax2.text(pow(10,-4),cAndKPosLabel,"Known and", ha='center',color=cAndKColor,fontsize=TxtSize)
#ax2.text(pow(10,-4),pow(10,1.1),"Candidate", ha='center',color=cAndKColor,fontsize=TxtSize)
ax2.plot([pow(10,-3), pow(10,-3)], [pow(10,3), pow(10,0)], 'k-', lw=3)
#ax2.plot([pow(10,-2), pow(10,-2)], [pow(10,1.4), pow(10,0)], 'k-', lw=3)
# Showing completeness
#ax1.plot([2.7*pow(10,-2), 2.7*pow(10,-2)], [pow(10,2), pow(10,0)], 'k-', lw=4,alpha=0.7, color="#0B6138")
#ax2.plot([2.35*pow(10,-0.03), 2.35*pow(10,-0.03)], [pow(10,2), pow(10,0)], 'k-', lw=4,alpha=0.7,color="#0B6138")
##
## Combine North and South Panels into Same Figure
##
plt.setp((ax1,ax2), xticks=[pow(10,-2), pow(10,-1),pow(10,0),pow(10,1)])
fig.text(0.5, 0.04, 'Integrated Flux Density (Jy)', ha='center',fontsize=TxtSize)
fig.set_size_inches(figWidth,figHeight)
fig.subplots_adjust(hspace=0,wspace=0)
fig.subplots_adjust(bottom=0.15)
#Completeness Limits
#plt.line(1,1,1, 10)#, 'k-', lw=3)
#fig.plot([pow(10,0.5), pow(10,0.5)], [1.5*pow(10,1), pow(10,-1)], 'k-', lw=3)
# Curve labels
ax1.text(pow(10,.1),pow(10,2.75),'B2 & Earlier',ha='left', color='red',fontsize=TxtSize-5)
ax1.text(pow(10,.1),pow(10,2.55),'O9.5 & Earlier',ha='left', color='blue',fontsize=TxtSize-5)
ax1.text(pow(10,.1),pow(10,2.35),'Observed',ha='left', color='black',fontsize=TxtSize-5)
fig.savefig('FluxBinned_FirstFourthQuads_compare.eps', format='eps', dpi=1000)
fig.show()
print "Simulated in 4th Quad : " + str(negFourBinS+negThreeBinS+negTwoBinS+negOneBinS+zeroBinS+oneBinS+twoBinS+threeBinS+fourBinS+fiveBinS+sixBinS)
print "Known in 4th Quad : " + str(negFourBinKS+negThreeBinKS+negTwoBinKS+negOneBinKS+zeroBinKS+oneBinKS+twoBinKS+threeBinKS+fourBinKS+fiveBinKS+sixBinKS)
|
WillArmentrout/galSims
|
tests/completeness/Completeness_TwoPanel_Compare_priortoJune4_2018.py
|
Python
|
gpl-2.0
| 25,987
|
[
"Galaxy"
] |
edebf7010b9b2200ee740c7d49b5b20b17a1a43ac5aa5178c348525276af5cd0
|
from setuptools import setup
setup(
name='moltemplate',
packages=['moltemplate',
'moltemplate.nbody_alt_symmetry'],
package_dir={'moltemplate': 'moltemplate'}, #.py files are in "moltemplate/"
package_data={'moltemplate': ['force_fields/*.lt']}, #.lt files are in "moltemplate/force_fields/"
#package_data={'moltemplate/force_fields':['*.lt']}
#
#package_data={'moltemplate/force_fields':
# ['compass_published.lt',
# 'cooke_deserno_lipid.lt',
# 'gaff2.lt',
# 'gaff.lt',
# 'graphene.lt',
# 'graphite.lt',
# 'loplsaa.lt',
# 'martini.lt',
# 'oplsaa.lt',
# 'sdk.lt',
# 'spce_ice_rect16.lt',
# 'spce_ice_rect32.lt',
# 'spce_ice_rect8.lt',
# 'spce.lt',
# 'tip3p_1983_charmm.lt',
# 'tip3p_1983.lt',
# 'tip3p_2004.lt',
# 'tip5p.lt',
# 'trappe1998.lt',
# 'watmw.lt']},
description='A general cross-platform text-based molecule builder for LAMMPS',
long_description='Moltemplate is a general cross-platform text-based molecule builder for LAMMPS and ESPResSo. Moltemplate was intended for building custom coarse-grained molecular models, but it can be used to prepare realistic all-atom simulations as well. It supports a variety of force fields for all-atom and coarse-grained modeling (including many-body forces and non-point-like particles). New force fields and examples are added continually by users. NOTE: Downloading moltemplate from pypi using PIP will omit all examples and documentation. Examples and documentation are available at https://moltemplate.org and https://github.com/jewettaij/moltemplate.',
author='Andrew Jewett',
author_email='jewett.aij@gmail.com',
url='https://github.com/jewettaij/moltemplate',
download_url='https://github.com/jewettaij/moltemplate/archive/v2.17.6.zip',
version='2.17.6',
keywords=['simulation', 'LAMMPS', 'molecule editor', 'molecule builder',
'ESPResSo'],
license='MIT',
classifiers=['Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
'Programming Language :: Unix Shell',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Intended Audience :: Science/Research'],
scripts=['moltemplate/scripts/moltemplate.sh',
'moltemplate/scripts/cleanup_moltemplate.sh',
'moltemplate/scripts/pdb2crds.awk',
'moltemplate/scripts/emoltemplate.sh'],
entry_points={
'console_scripts': [
'ttree.py=moltemplate.ttree:main',
'ttree_render.py=moltemplate.ttree_render:main',
'bonds_by_type.py=moltemplate.bonds_by_type:main',
'charge_by_bond.py=moltemplate.charge_by_bond:main',
'dump2data.py=moltemplate.dump2data:main',
'extract_espresso_atom_types.py=moltemplate.extract_espresso_atom_types:main',
'extract_lammps_data.py=moltemplate.extract_lammps_data:main',
'ettree.py=moltemplate.ettree:main',
'genpoly.py=moltemplate.ettree:main',
'interpolate_curve.py=moltemplate.interpolate_curve:main',
'ltemplify.py=moltemplate.ltemplify:main',
'lttree.py=moltemplate.lttree:main',
'lttree_check.py=moltemplate.lttree_check:main',
'lttree_postprocess.py=moltemplate.lttree_postprocess:main',
'nbody_by_type.py=moltemplate.nbody_by_type:main',
'nbody_fix_ttree_assignments.py=moltemplate.nbody_fix_ttree_assignments:main',
'nbody_reorder_atoms.py=moltemplate.nbody_reorder_atoms:main',
'pdbsort.py=moltemplate.pdbsort:main',
'postprocess_input_script.py=moltemplate.postprocess_input_script:main',
'postprocess_coeffs.py=moltemplate.postprocess_coeffs:main',
'raw2data.py=moltemplate.raw2data:main',
'recenter_coords.py=moltemplate.recenter_coords:main',
'remove_duplicate_atoms.py=moltemplate.remove_duplicate_atoms:main',
'remove_duplicates_nbody.py=moltemplate.remove_duplicates_nbody:main',
'renumber_DATA_first_column.py=moltemplate.renumber_DATA_first_column:main']},
install_requires=[
'numpy',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
zip_safe=True,
include_package_data=True
)
|
smsaladi/moltemplate
|
setup.py
|
Python
|
bsd-3-clause
| 4,935
|
[
"ESPResSo",
"LAMMPS"
] |
2d7a16fffd11676f1d2ecd223aa3b2e0e8d53fec12fb0c9ade9604e895e26f28
|
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import json
import operator
import os
import shutil
import sys
import tarfile
import tempfile
import threading
import time
import yaml
from contextlib import contextmanager
from distutils.version import LooseVersion, StrictVersion
from hashlib import sha256
from io import BytesIO
from yaml.error import YAMLError
try:
import queue
except ImportError:
import Queue as queue # Python 2
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash, secure_hash_s
from ansible.module_utils.urls import open_url
urlparse = six.moves.urllib.parse.urlparse
urllib_error = six.moves.urllib.error
display = Display()
MANIFEST_FORMAT = 1
class CollectionRequirement:
_FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
files=None, skip=False):
"""
Represents a collection requirement, the versions that are available to be installed as well as any
dependencies the collection has.
:param namespace: The collection namespace.
:param name: The collection name.
:param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
:param api: The GalaxyAPI to use if the collection is from Galaxy.
:param versions: A list of versions of the collection that are available.
:param requirement: The version requirement string used to verify the list of versions fit the requirements.
:param force: Whether the force flag applied to the collection.
:param parent: The name of the parent the collection is a dependency of.
:param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
server.
:param files: The files that exist inside the collection. This is based on the FILES.json file inside the
collection artifact.
:param skip: Whether to skip installing the collection. Should be set if the collection is already installed
and force is not set.
"""
self.namespace = namespace
self.name = name
self.b_path = b_path
self.api = api
self.versions = set(versions)
self.force = force
self.skip = skip
self.required_by = []
self._metadata = metadata
self._files = files
self.add_requirement(parent, requirement)
def __str__(self):
return to_native("%s.%s" % (self.namespace, self.name))
def __unicode__(self):
return u"%s.%s" % (self.namespace, self.name)
@property
def latest_version(self):
try:
return max([v for v in self.versions if v != '*'], key=LooseVersion)
except ValueError: # ValueError: max() arg is an empty sequence
return '*'
@property
def dependencies(self):
if self._metadata:
return self._metadata.dependencies
elif len(self.versions) > 1:
return None
self._get_metadata()
return self._metadata.dependencies
def add_requirement(self, parent, requirement):
self.required_by.append((parent, requirement))
new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
if len(new_versions) == 0:
if self.skip:
force_flag = '--force-with-deps' if parent else '--force'
version = self.latest_version if self.latest_version != '*' else 'unknown'
msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
% (to_text(self), requirement, version, force_flag)
raise AnsibleError(msg)
elif parent is None:
msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
else:
msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
% (to_text(self), requirement, parent)
collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
req_by = "\n".join(
"\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
for p, r in self.required_by
)
versions = ", ".join(sorted(self.versions, key=LooseVersion))
raise AnsibleError(
"%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s"
% (msg, collection_source, versions, req_by)
)
self.versions = new_versions
def install(self, path, b_temp_path):
if self.skip:
display.display("Skipping '%s' as it is already installed" % to_text(self))
return
# Install if it is not
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
if self.b_path is None:
download_url = self._metadata.download_url
artifact_hash = self._metadata.artifact_sha256
headers = {}
self.api._add_auth_token(headers, download_url, required=False)
self.b_path = _download_file(download_url, b_temp_path, artifact_hash, self.api.validate_certs,
headers=headers)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
os.makedirs(b_collection_path)
with tarfile.open(self.b_path, mode='r') as collection_tar:
files_member_obj = collection_tar.getmember('FILES.json')
with _tarfile_extract(collection_tar, files_member_obj) as files_obj:
files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
_extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
_extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
for file_info in files['files']:
file_name = file_info['name']
if file_name == '.':
continue
if file_info['ftype'] == 'file':
_extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
expected_hash=file_info['chksum_sha256'])
else:
os.makedirs(os.path.join(b_collection_path, to_bytes(file_name, errors='surrogate_or_strict')))
def set_latest_version(self):
self.versions = set([self.latest_version])
self._get_metadata()
def _get_metadata(self):
if self._metadata:
return
self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
def _meets_requirements(self, version, requirements, parent):
"""
Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
"""
op_map = {
'!=': operator.ne,
'==': operator.eq,
'=': operator.eq,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
for req in list(requirements.split(',')):
op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
op = op_map.get(req[:op_pos])
requirement = req[op_pos:]
if not op:
requirement = req
op = operator.eq
# In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
# version as '*' (unknown version) unless the requirement is also '*'.
if parent and version == '*' and requirement != '*':
break
elif requirement == '*' or version == '*':
continue
if not op(LooseVersion(version), LooseVersion(requirement)):
break
else:
return True
# The loop was broken early, it does not meet all the requirements
return False
@staticmethod
def from_tar(b_path, force, parent=None):
if not tarfile.is_tarfile(b_path):
raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
info = {}
with tarfile.open(b_path, mode='r') as collection_tar:
for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
n_member_name = to_native(b_member_name)
try:
member = collection_tar.getmember(n_member_name)
except KeyError:
raise AnsibleError("Collection at '%s' does not contain the required file %s."
% (to_native(b_path), n_member_name))
with _tarfile_extract(collection_tar, member) as member_obj:
try:
info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection tar file member %s does not contain a valid json string."
% n_member_name)
meta = info['manifest_file']['collection_info']
files = info['files_file']['files']
namespace = meta['namespace']
name = meta['name']
version = meta['version']
meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files)
@staticmethod
def from_path(b_path, force, parent=None):
info = {}
for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
b_file_path = os.path.join(b_path, b_file_name)
if not os.path.exists(b_file_path):
continue
with open(b_file_path, 'rb') as file_obj:
try:
info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection file at '%s' does not contain a valid json string."
% to_native(b_file_path))
if 'manifest_file' in info:
manifest = info['manifest_file']['collection_info']
namespace = manifest['namespace']
name = manifest['name']
version = manifest['version']
dependencies = manifest['dependencies']
else:
display.warning("Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
% to_text(b_path))
parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
namespace = os.path.split(parent_dir)[1]
version = '*'
dependencies = {}
meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
files = info.get('files_file', {}).get('files', {})
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, skip=True)
@staticmethod
def from_name(collection, apis, requirement, force, parent=None):
namespace, name = collection.split('.', 1)
galaxy_meta = None
for api in apis:
try:
if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
requirement.startswith('!=')):
if requirement.startswith('='):
requirement = requirement.lstrip('=')
resp = api.get_collection_version_metadata(namespace, name, requirement)
galaxy_meta = resp
versions = [resp.version]
else:
resp = api.get_collection_versions(namespace, name)
# Galaxy supports semver but ansible-galaxy does not. We ignore any versions that don't match
# StrictVersion (x.y.z) and only support pre-releases if an explicit version was set (done above).
versions = [v for v in resp if StrictVersion.version_re.match(v)]
except GalaxyError as err:
if err.http_code == 404:
display.vvv("Collection '%s' is not available from server %s %s"
% (collection, api.name, api.api_server))
continue
raise
display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
break
else:
raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
metadata=galaxy_meta)
return req
def build_collection(collection_path, output_path, force):
"""
Creates the Ansible collection artifact in a .tar.gz file.
:param collection_path: The path to the collection to build. This should be the directory that contains the
galaxy.yml file.
:param output_path: The path to create the collection build artifact. This should be a directory.
:param force: Whether to overwrite an existing collection build artifact or fail.
:return: The path to the collection build artifact.
"""
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
b_galaxy_path = os.path.join(b_collection_path, b'galaxy.yml')
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
collection_meta = _get_galaxy_yml(b_galaxy_path)
file_manifest = _build_files_manifest(b_collection_path, collection_meta['namespace'], collection_meta['name'],
collection_meta['build_ignore'])
collection_manifest = _build_manifest(**collection_meta)
collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
collection_meta['name'],
collection_meta['version']))
b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
if os.path.exists(b_collection_output):
if os.path.isdir(b_collection_output):
raise AnsibleError("The output collection artifact '%s' already exists, "
"but is a directory - aborting" % to_native(collection_output))
elif not force:
raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
"the collection artifact." % to_native(collection_output))
_build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
def publish_collection(collection_path, api, wait, timeout):
"""
Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
with _display_progress():
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps):
"""
Install Ansible collections to the path specified.
:param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
:param output_path: The path to install the collections to.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param validate_certs: Whether to validate the certificates if downloading a tarball.
:param ignore_errors: Whether to ignore any errors when installing the collection.
:param no_deps: Ignore any collection dependencies and only install the base requirements.
:param force: Re-install a collection if it has already been installed.
:param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
"""
existing_collections = _find_existing_collections(output_path)
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
validate_certs, force, force_deps, no_deps)
display.display("Starting collection install process")
with _display_progress():
for collection in dependency_map.values():
try:
collection.install(output_path, b_temp_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (to_text(collection), to_text(err)))
else:
raise
def validate_collection_name(name):
"""
Validates the collection name as an input from the user or a requirements file fit the requirements.
:param name: The input name with optional range specifier split by ':'.
:return: The input value, required for argparse validation.
"""
collection, dummy, dummy = name.partition(':')
if AnsibleCollectionRef.is_valid_collection_name(collection):
return name
raise AnsibleError("Invalid collection name '%s', "
"name must be in the format <namespace>.<collection>. "
"Please make sure namespace and collection name contains "
"characters from [a-zA-Z0-9_] only." % name)
@contextmanager
def _tempdir():
b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
yield b_temp_path
shutil.rmtree(b_temp_path)
@contextmanager
def _tarfile_extract(tar, member):
tar_obj = tar.extractfile(member)
yield tar_obj
tar_obj.close()
@contextmanager
def _display_progress():
config_display = C.GALAXY_DISPLAY_PROGRESS
display_wheel = sys.stdout.isatty() if config_display is None else config_display
if not display_wheel:
yield
return
def progress(display_queue, actual_display):
actual_display.debug("Starting display_progress display thread")
t = threading.current_thread()
while True:
for c in "|/-\\":
actual_display.display(c + "\b", newline=False)
time.sleep(0.1)
# Display a message from the main thread
while True:
try:
method, args, kwargs = display_queue.get(block=False, timeout=0.1)
except queue.Empty:
break
else:
func = getattr(actual_display, method)
func(*args, **kwargs)
if getattr(t, "finish", False):
actual_display.debug("Received end signal for display_progress display thread")
return
class DisplayThread(object):
def __init__(self, display_queue):
self.display_queue = display_queue
def __getattr__(self, attr):
def call_display(*args, **kwargs):
self.display_queue.put((attr, args, kwargs))
return call_display
# Temporary override the global display class with our own which add the calls to a queue for the thread to call.
global display
old_display = display
try:
display_queue = queue.Queue()
display = DisplayThread(display_queue)
t = threading.Thread(target=progress, args=(display_queue, old_display))
t.daemon = True
t.start()
try:
yield
finally:
t.finish = True
t.join()
except Exception:
# The exception is re-raised so we can sure the thread is finished and not using the display anymore
raise
finally:
display = old_display
def _get_galaxy_yml(b_galaxy_yml_path):
meta_info = get_collections_galaxy_meta_info()
mandatory_keys = set()
string_keys = set()
list_keys = set()
dict_keys = set()
for info in meta_info:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
try:
with open(b_galaxy_yml_path, 'rb') as g_yaml:
galaxy_yml = yaml.safe_load(g_yaml)
except YAMLError as err:
raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
% (to_native(b_galaxy_yml_path), to_native(err)))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# license is a builtin var in Python, to avoid confusion we just rename it to license_ids
galaxy_yml['license_ids'] = galaxy_yml['license']
del galaxy_yml['license']
return galaxy_yml
def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
# We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
# patterns can be extended by the build_ignore key in galaxy.yml
b_ignore_patterns = [
b'galaxy.yml',
b'*.pyc',
b'*.retry',
b'tests/output', # Ignore ansible-test result output directory.
to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
]
b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
entry_template = {
'name': None,
'ftype': None,
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT
}
manifest = {
'files': [
{
'name': '.',
'ftype': 'dir',
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT,
},
],
'format': MANIFEST_FORMAT,
}
def _walk(b_path, b_top_level_dir):
for b_item in os.listdir(b_path):
b_abs_path = os.path.join(b_path, b_item)
b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
b_rel_path = os.path.join(b_rel_base_dir, b_item)
rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
if os.path.isdir(b_abs_path):
if any(b_item == b_path for b_path in b_ignore_dirs) or \
any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
if os.path.islink(b_abs_path):
b_link_target = os.path.realpath(b_abs_path)
if not b_link_target.startswith(b_top_level_dir):
display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
% to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'dir'
manifest['files'].append(manifest_entry)
_walk(b_abs_path, b_top_level_dir)
else:
if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'file'
manifest_entry['chksum_type'] = 'sha256'
manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
manifest['files'].append(manifest_entry)
_walk(b_collection_path, b_collection_path)
return manifest
def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
dependencies, repository, documentation, homepage, issues, **kwargs):
manifest = {
'collection_info': {
'namespace': namespace,
'name': name,
'version': version,
'authors': authors,
'readme': readme,
'tags': tags,
'description': description,
'license': license_ids,
'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
'dependencies': dependencies,
'repository': repository,
'documentation': documentation,
'homepage': homepage,
'issues': issues,
},
'file_manifest_file': {
'name': 'FILES.json',
'ftype': 'file',
'chksum_type': 'sha256',
'chksum_sha256': None, # Filled out in _build_collection_tar
'format': MANIFEST_FORMAT
},
'format': MANIFEST_FORMAT,
}
return manifest
def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
with _tempdir() as b_temp_path:
b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
# Add the MANIFEST.json and FILES.json file to the archive
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_io = BytesIO(b)
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = time.time()
tar_info.mode = 0o0644
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
# arcname expects a native string, cannot be bytes
filename = to_native(file_info['name'], errors='surrogate_or_strict')
b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
def reset_stat(tarinfo):
tarinfo.mode = 0o0755 if tarinfo.isdir() else 0o0644
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
return tarinfo
tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
shutil.copy(b_tar_filepath, b_tar_path)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
def _find_existing_collections(path):
collections = []
b_path = to_bytes(path, errors='surrogate_or_strict')
for b_namespace in os.listdir(b_path):
b_namespace_path = os.path.join(b_path, b_namespace)
if os.path.isfile(b_namespace_path):
continue
for b_collection in os.listdir(b_namespace_path):
b_collection_path = os.path.join(b_namespace_path, b_collection)
if os.path.isdir(b_collection_path):
req = CollectionRequirement.from_path(b_collection_path, False)
display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
to_text(b_collection_path)))
collections.append(req)
return collections
def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
no_deps):
dependency_map = {}
# First build the dependency map on the actual requirements
for name, version, source in collections:
_get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
validate_certs, (force or force_deps))
checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
while len(dependency_map) != len(checked_parents):
while not no_deps: # Only parse dependencies if no_deps was not set
parents_to_check = set(dependency_map.keys()).difference(checked_parents)
deps_exhausted = True
for parent in parents_to_check:
parent_info = dependency_map[parent]
if parent_info.dependencies:
deps_exhausted = False
for dep_name, dep_requirement in parent_info.dependencies.items():
_get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
parent_info.api, b_temp_path, apis, validate_certs, force_deps,
parent=parent)
checked_parents.add(parent)
# No extra dependencies were resolved, exit loop
if deps_exhausted:
break
# Now we have resolved the deps to our best extent, now select the latest version for collections with
# multiple versions found and go from there
deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
for collection in deps_not_checked:
dependency_map[collection].set_latest_version()
if no_deps or len(dependency_map[collection].dependencies) == 0:
checked_parents.add(collection)
return dependency_map
def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
validate_certs, force, parent=None):
dep_msg = ""
if parent:
dep_msg = " - as dependency of %s" % parent
display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
b_tar_path = None
if os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')):
display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
elif urlparse(collection).scheme.lower() in ['http', 'https']:
display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
try:
b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
except urllib_error.URLError as err:
raise AnsibleError("Failed to download collection tar from '%s': %s"
% (to_native(collection), to_native(err)))
if b_tar_path:
req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
collection_name = to_text(req)
if collection_name in dep_map:
collection_info = dep_map[collection_name]
collection_info.add_requirement(None, req.latest_version)
else:
collection_info = req
else:
validate_collection_name(collection)
display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
if collection in dep_map:
collection_info = dep_map[collection]
collection_info.add_requirement(parent, requirement)
else:
apis = [source] if source else apis
collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent)
existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
if existing and not collection_info.force:
# Test that the installed collection fits the requirement
existing[0].add_requirement(to_text(collection_info), requirement)
collection_info = existing[0]
dep_map[to_text(collection_info)] = collection_info
def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
bufsize = 65536
digest = sha256()
urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
display.vvv("Downloading %s to %s" % (url, to_text(b_path)))
# Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
unredirected_headers=['Authorization'], http_agent=user_agent())
with open(b_file_path, 'wb') as download_file:
data = resp.read(bufsize)
while data:
digest.update(data)
download_file.write(data)
data = resp.read(bufsize)
if expected_hash:
actual_hash = digest.hexdigest()
display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
if expected_hash != actual_hash:
raise AnsibleError("Mismatch artifact hash with downloaded file")
return b_file_path
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
n_filename = to_native(filename, errors='surrogate_or_strict')
try:
member = tar.getmember(n_filename)
except KeyError:
raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (to_native(tar.name),
n_filename))
with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
bufsize = 65536
sha256_digest = sha256()
with _tarfile_extract(tar, member) as tar_obj:
data = tar_obj.read(bufsize)
while data:
tmpfile_obj.write(data)
tmpfile_obj.flush()
sha256_digest.update(data)
data = tar_obj.read(bufsize)
actual_hash = sha256_digest.hexdigest()
if expected_hash and actual_hash != expected_hash:
raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
% (n_filename, to_native(tar.name)))
b_dest_filepath = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
b_parent_dir = os.path.split(b_dest_filepath)[0]
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
os.makedirs(b_parent_dir)
shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
|
Lujeni/ansible
|
lib/ansible/galaxy/collection.py
|
Python
|
gpl-3.0
| 40,175
|
[
"Galaxy"
] |
eec1f1541776de2114535fea7cb3bcbd548481630eedac52c942910a3222cd75
|
# -*- coding: utf-8 -*-
#
# Copyright (C) University College London, 2013, all rights reserved.
#
# This file is part of FabMD and is CONFIDENTIAL. You may not work
# with, install, use, duplicate, modify, redistribute or share this
# file, or any part thereof, other than as allowed by any agreement
# specifically made by you with University College London.
#
from fab import *
@task
def lammps(config,**args):
"""Submit a LAMMPS job to the remote queue.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
images : number of images to take
steering : steering session i.d.
wall_time : wall-time job limit
memory : memory per node
"""
with_config(config)
execute(put_configs,config)
job(dict(script='lammps',
cores=4, wall_time='0:15:0',memory='2G'),args)
#@task
#def lammps_swelling_test(config, **args):
"""Submits a set of LAMMPS jobs to the remote queue, as part of a clay swelling test."""
#let's first try to run the exfoliated one.
#lammps_in_file =
#with_config(config)
#execute(put_configs,config)
#loop over swelling values
#update_environment(dict(job_results, job_config_path))
#job(dict(script='lammps',
#cores=4, wall_time='0:15:0',memory='2G'),args)
### IBI ###
@task
def do_ibi(number, outdir, pressure=1, config_name="peg", copy="yes", ibi_script="ibi.sh", atom_dir=os.path.join(env.localroot,'python')):
""" Copy the obtained output to a work directory, do an IBI iteration and make a new config file from the resulting data. """
ibi_in_dir = os.path.join(env.localroot,'results',outdir)
ibi_out_dir = os.path.join(env.localroot,'output_blackbox',os.path.basename(ibi_script),outdir)
local("mkdir -p %s" % (ibi_out_dir))
# if copy=="yes":
# blackbox("copy_lammps_results.sh", "%s %s %d" % (os.path.join(env.localroot,'results',outdir), os.path.join(env.localroot,'python'), int(number)))
blackbox(ibi_script, "%s %s %s %s %s" % (atom_dir, number, pressure, ibi_in_dir, ibi_out_dir))
if copy=="yes":
blackbox("prepare_lammps_config.sh", "%s %s %s %d %s" % (ibi_out_dir, os.path.join(env.localroot,'config_files'), config_name, int(number)+1, atom_dir))
@task
def ibi_analysis_multi(start_iter, num_iters, outdir_prefix, outdir_suffix, ibi_script="ibi.sh", pressure=1, atom_dir=os.path.join(env.localroot,'python')):
""" Recreate IBI analysis results based on the output files provided.
Example use: fab hector ibi_analysis_multi:start_iter=7,num_iters=3,outdir_prefix=peg_,outdir_suffix=_hector_32 """
si = int(start_iter)
ni = int(num_iters)
for i in xrange(si,si+ni):
outdir = "%s%d%s" % (outdir_prefix,i,outdir_suffix)
do_ibi(i, outdir, pressure, outdir_prefix, "no", ibi_script, atom_dir)
# ibi_in_dir = os.path.join(env.localroot,'results',outdir)
# ibi_out_dir = os.path.join(env.localroot,'ibi_output',outdir)
# local("mkdir -p %s" % (ibi_out_dir))
# blackbox("copy_lammps_results.sh", "%s %s %d" % (os.path.join(env.localroot,'results',"%s%d%s" % (outdir_prefix,i,outdir_suffix)), os.path.join(env.localroot,'python'), i))
# blackbox(ibi_script, "%s %s %s %s" % (i, pressure, ibi_in_dir, ibi_out_dir))
@task
def full_ibi(config, number, outdir, config_name, pressure=0.3, ibi_script="ibi.sh", atom_dir=os.path.join(env.localroot,'python'), **args):
""" Performs both do_ibi and runs lammps with the newly created config file.
Example use: fab hector full_ibi:config=2peg4,number=3,outdir=2peg3_hector_32,config_name=2peg,cores=32,wall_time=3:0:0 """
do_ibi(number, outdir, pressure, config_name, "yes", ibi_script, atom_dir)
lammps(config, **args)
wait_complete()
fetch_results(regex="*%s*" % (config_name))
@task
def full_ibi_multi(start_iter, num_iters, config_name, outdir_suffix, pressure=0.3, script="ibi.sh", atom_dir=os.path.join(env.localroot,'python'), **args):
""" Do multiple IBI iterations in one command.
Example use: fab hector full_ibi_multi:start_iter=7,num_iters=3,config_name=2peg,outdir_suffix=_hector_32,cores=32,wall_time=3:0:0 """
si = int(start_iter)
ni = int(num_iters)
pressure_changed = 0
for i in xrange(si,si+ni):
full_ibi("%s%d" % (config_name,i+1), i, "%s%d%s" % (config_name,i,outdir_suffix), config_name, pressure, script, atom_dir, **args)
p_ave, p_std = lammps_get_pressure(os.path.join(env.localroot,"results","%s%d%s" % (config_name,i,outdir_suffix)), i)
print "Average pressure is now", p_ave, "after iteration", i, "completed."
#if(i >= 10 and p_ave < p_std):
# if pressure_changed == 0:
# pressure = float(pressure)/3.0
# pressure_changed = 1
# print "(FabMD:) Pressure factor now set to", pressure, "after iteration", i
# if abs(p_ave) - (p_std*0.5) < 0: # We have converged, let's not waste further CPU cycles!
# print "(FabMD:) Pressure has converged. OPTIMIZATION COMPLETE"
# break
### Utitility Functions
def lammps_get_pressure(log_dir,number):
steps = []
pressures = []
LIST_IN = open(os.path.join(log_dir, "new_CG.prod%d.log" % (number)), 'r')
for line in LIST_IN:
NewRow = (line.strip()).split()
if len(NewRow) > 0:
if NewRow[0] == "Press":
pressures.append(float(NewRow[2]))
d1 = np.array(pressures[5:])
print "READ: new_CG.prod%d.log" % (number)
return np.average(d1), np.std(d1) #average and stdev
|
uschille/FabSim
|
deploy/fabNanoMD.py
|
Python
|
lgpl-3.0
| 5,847
|
[
"LAMMPS"
] |
921ea6c18cd3008394a08f3b1892baac86c28769c9c122e20e779eec391bf9fe
|
from os.path import join
import os
import numpy as n
import glob
import sys
import time
import astropy.io.fits as fits
from os.path import join
import astropy.cosmology as co
cosmo = co.Planck13
import astropy.io.fits as fits
# for one galaxy spectrum
import GalaxySpectrumFIREFLY as gs
import StellarPopulationModel as spm
init_cat=join(os.environ['VVDS_DIR'], "catalogs", "VVDS_WIDE_summary.v1.fits")
plate_catalog = join(os.environ['VVDS_DIR'], "catalogs", "VVDS_WIDE_summary.v1.spm.fits")
hdu_orig_table = fits.open(init_cat)
orig_table = hdu_orig_table[1].data
orig_cols = orig_table.columns
kroupaFolder = join( os.environ['VVDS_DIR'], 'stellarpop-m11-kroupa', 'stellarpop')
salpeterFolder = join( os.environ['VVDS_DIR'], 'stellarpop-m11-salpeter', 'stellarpop')
dV=-9999.99
def get_table_entry_full(hduSPM):
return n.array([ 10**hduSPM.header['age_lightW_mean'], 10**hduSPM.header['age_lightW_mean_up']-10**hduSPM.header['age_lightW_mean'], 10**hduSPM.header['age_lightW_mean']-10**hduSPM.header['age_lightW_mean_low'], hduSPM.header['metallicity_lightW_mean'], hduSPM.header['metallicity_lightW_mean_up'] - hduSPM.header['metallicity_lightW_mean'], hduSPM.header['metallicity_lightW_mean'] - hduSPM.header['metallicity_lightW_mean_low'], hduSPM.header['stellar_mass_mean'], hduSPM.header['stellar_mass_mean_up'] - hduSPM.header['stellar_mass_mean'], hduSPM.header['stellar_mass_mean'] - hduSPM.header['stellar_mass_mean_low'], hduSPM.header['EBV'], hduSPM.header['ssp_number']])
headers =" age_lightW_mean_kroupa age_lightW_err_plus_kroupa age_lightW_err_minus_kroupa metallicity_lightW_mean_kroupa metallicity_lightW_mean_err_plus_kroupa metallicity_lightW_mean_err_minus_kroupa stellar_mass_kroupa stellar_mass_err_plus_kroupa stellar_mass_err_minus_kroupa spm_EBV_kroupa nComponentsSSP_kroupa age_lightW_mean_salpeter age_lightW_err_plus_salpeter age_lightW_err_minus_salpeter metallicity_lightW_mean_salpeter metallicity_lightW_mean_err_plus_salpeter metallicity_lightW_mean_err_minus_salpeter stellar_mass_salpeter stellar_mass_err_plus_salpeter stellar_mass_err_minus_salpeter spm_EBV_salpeter nComponentsSSP_salpeter"
table_all = []
for catalog_entry in orig_table:
krF = join(kroupaFolder, 'spFly-vvdswide-' + str(catalog_entry['NUM']) + "-kr.fits")
ssF = join(salpeterFolder, 'spFly-vvdswide-' + str(catalog_entry['NUM']) + "-ss.fits")
if os.path.isfile(krF) and os.path.isfile(ssF):
#print "gets info"
table_entry_kr = get_table_entry_full( hduSPM=fits.open(krF)[1] )
#print table_entry_kr.shape
table_entry_ss = get_table_entry_full( hduSPM=fits.open(ssF)[1] )
#print table_entry_ss.shape
table_entry = n.hstack((table_entry_kr, table_entry_ss))
table_all.append(table_entry)
else:
table_all.append(n.ones(22)*dV)
newDat = n.transpose(table_all)
all_cols = []
for data_array, head in zip(newDat, headers.split()):
all_cols.append(fits.Column(name=head, format='D', array=data_array))
new_cols = fits.ColDefs(all_cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
if os.path.isfile(plate_catalog):
os.remove(plate_catalog)
hdu.writeto(plate_catalog)
init_cat=join(os.environ['VVDS_DIR'], "catalogs", "VVDS_DEEP_summary.v1.fits")
summary_catalog = join(os.environ['VVDS_DIR'], "catalogs", "VVDS_DEEP_summary.v1.spm.fits")
hdu_orig_table = fits.open(init_cat)
orig_table = hdu_orig_table[1].data
orig_cols = orig_table.columns
table_all = []
for catalog_entry in orig_table:
krF = join(kroupaFolder,'spFly-vvdsdeep-'+str(catalog_entry['NUM'])+"-kr.fits")
ssF = join(salpeterFolder, 'spFly-vvdsdeep-'+str(catalog_entry['NUM'])+"-ss.fits")
if os.path.isfile(krF) and os.path.isfile(ssF):
#print "gets info"
table_entry_kr = get_table_entry_full( hduSPM=fits.open(krF)[1] )
#print table_entry_kr.shape
table_entry_ss = get_table_entry_full( hduSPM=fits.open(ssF)[1] )
#print table_entry_ss.shape
table_entry = n.hstack((table_entry_kr, table_entry_ss))
table_all.append(table_entry)
else:
table_all.append(n.ones(22)*dV)
newDat = n.transpose(table_all)
all_cols = []
for data_array, head in zip(newDat, headers.split()):
all_cols.append(fits.Column(name=head, format='D', array=data_array))
new_cols = fits.ColDefs(all_cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
if os.path.isfile(summary_catalog):
os.remove(summary_catalog)
hdu.writeto(summary_catalog)
|
JohanComparat/pySU
|
spm/bin_deep_surveys/create_summary_table_VVDS.py
|
Python
|
cc0-1.0
| 4,372
|
[
"Galaxy"
] |
8c0e1b97505fe726f26194f1beaad2fd604745c52f614c523408284124aac062
|
from collections import OrderedDict
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = OrderedDict((
('{{first_name_female}} {{last_name}}', 0.97),
('{{prefix_female}} {{first_name_female}} {{last_name}}', 0.015),
('{{first_name_female}} {{last_name}} {{suffix_female}}', 0.02),
('{{prefix_female}} {{first_name_female}} {{last_name}} {{suffix_female}}', 0.005),
))
formats_male = OrderedDict((
('{{first_name_male}} {{last_name}}', 0.97),
('{{prefix_male}} {{first_name_male}} {{last_name}}', 0.015),
('{{first_name_male}} {{last_name}} {{suffix_male}}', 0.02),
('{{prefix_male}} {{first_name_male}} {{last_name}} {{suffix_male}}', 0.005),
))
# Using random_element's dictionary weighting means that the
# formats = formats_male + formats_female
# has to be replaced with something dict and python 2.x compatible
formats = formats_male.copy()
formats.update(formats_female)
# Top 200 names of the decade from the 60's-90's from:
# https://www.ssa.gov/OACT/babynames/decades/names1960s.html
# Weightings derived from total number on each name
first_names_female = OrderedDict((
('April', 0.004529083),
('Abigail', 0.002043839),
('Adriana', 0.000488767),
('Adrienne', 0.000622931),
('Aimee', 0.000424727),
('Alejandra', 0.000415754),
('Alexa', 0.000663005),
('Alexandra', 0.002835711),
('Alexandria', 0.000964993),
('Alexis', 0.003446735),
('Alice', 0.000589904),
('Alicia', 0.003766845),
('Alisha', 0.000475942),
('Alison', 0.001506047),
('Allison', 0.003740866),
('Alyssa', 0.00324341),
('Amanda', 0.015360768),
('Amber', 0.006928794),
('Amy', 0.012860314),
('Ana', 0.000853679),
('Andrea', 0.006747028),
('Angel', 0.001161117),
('Angela', 0.011954085),
('Angelica', 0.001102746),
('Angie', 0.00030166),
('Anita', 0.001383767),
('Ann', 0.002627483),
('Anna', 0.004691502),
('Anne', 0.002089582),
('Annette', 0.001487399),
('Ariana', 0.000412668),
('Ariel', 0.000615774),
('Ashlee', 0.000696534),
('Ashley', 0.014773009),
('Audrey', 0.001139165),
('Autumn', 0.000918594),
('Bailey', 0.000691916),
('Barbara', 0.004839169),
('Becky', 0.000960944),
('Belinda', 0.000502227),
('Beth', 0.002246113),
('Bethany', 0.001249385),
('Betty', 0.000840241),
('Beverly', 0.000990272),
('Bianca', 0.000624835),
('Bonnie', 0.001351901),
('Brandi', 0.002077216),
('Brandy', 0.002177499),
('Breanna', 0.000876003),
('Brenda', 0.005737124),
('Briana', 0.00093665),
('Brianna', 0.002543549),
('Bridget', 0.000787232),
('Brittany', 0.007258404),
('Brittney', 0.001566147),
('Brooke', 0.002410152),
('Caitlin', 0.001808319),
('Caitlyn', 0.000481194),
('Candace', 0.000550662),
('Candice', 0.000653199),
('Carla', 0.00195185),
('Carly', 0.000498725),
('Carmen', 0.000891783),
('Carol', 0.002972719),
('Caroline', 0.001198127),
('Carolyn', 0.002647225),
('Carrie', 0.002934659),
('Casey', 0.001177707),
('Cassandra', 0.002501243),
('Cassidy', 0.000452129),
('Cassie', 0.000344886),
('Catherine', 0.004460622),
('Cathy', 0.001413248),
('Charlene', 0.000538865),
('Charlotte', 0.000530417),
('Chelsea', 0.00280043),
('Chelsey', 0.000368501),
('Cheryl', 0.004166447),
('Cheyenne', 0.000696907),
('Chloe', 0.000565807),
('Christie', 0.000397873),
('Christina', 0.008735669),
('Christine', 0.007488758),
('Christy', 0.00141861),
('Cindy', 0.003360109),
('Claire', 0.000553835),
('Claudia', 0.00096055),
('Colleen', 0.001836203),
('Connie', 0.001821845),
('Courtney', 0.00484939),
('Cristina', 0.000328734),
('Crystal', 0.006365045),
('Cynthia', 0.007655379),
('Daisy', 0.000437443),
('Dana', 0.003395805),
('Danielle', 0.006671783),
('Darlene', 0.000952737),
('Dawn', 0.005014983),
('Deanna', 0.002049026),
('Debbie', 0.001842922),
('Deborah', 0.005386088),
('Debra', 0.004123572),
('Denise', 0.004592291),
('Desiree', 0.000991497),
('Destiny', 0.001055515),
('Diamond', 0.000331732),
('Diana', 0.003699348),
('Diane', 0.003058996),
('Dominique', 0.000847857),
('Donna', 0.00570819),
('Doris', 0.000398026),
('Dorothy', 0.000722426),
('Ebony', 0.000399624),
('Eileen', 0.000544271),
('Elaine', 0.000601175),
('Elizabeth', 0.014954075),
('Ellen', 0.000747267),
('Emily', 0.009100581),
('Emma', 0.001272059),
('Erica', 0.004344471),
('Erika', 0.002105537),
('Erin', 0.005450719),
('Evelyn', 0.000825095),
('Faith', 0.000427113),
('Felicia', 0.001717294),
('Frances', 0.000546897),
('Gabriela', 0.000526937),
('Gabriella', 0.00044123),
('Gabrielle', 0.001090096),
('Gail', 0.00071934),
('Gina', 0.002841095),
('Glenda', 0.000384982),
('Gloria', 0.001155623),
('Grace', 0.00087202),
('Gwendolyn', 0.000407831),
('Hailey', 0.000662917),
('Haley', 0.001557939),
('Hannah', 0.004189822),
('Hayley', 0.000478305),
('Heather', 0.010945254),
('Heidi', 0.002239941),
('Helen', 0.000636675),
('Holly', 0.003487028),
('Isabel', 0.000352305),
('Isabella', 0.000410282),
('Jackie', 0.000566748),
('Jaclyn', 0.00047708),
('Jacqueline', 0.004811242),
('Jade', 0.000446264),
('Jaime', 0.000853175),
('Jamie', 0.005067663),
('Jane', 0.0009486),
('Janet', 0.002489993),
('Janice', 0.001593308),
('Jasmin', 0.000333374),
('Jasmine', 0.003025422),
('Jean', 0.000815969),
('Jeanette', 0.000767293),
('Jeanne', 0.000515381),
('Jenna', 0.001804052),
('Jennifer', 0.029218839),
('Jenny', 0.000932667),
('Jessica', 0.020047608),
('Jill', 0.003253018),
('Jillian', 0.000988587),
('Jo', 0.000442083),
('Joan', 0.000802793),
('Joann', 0.000544336),
('Joanna', 0.001176284),
('Joanne', 0.000729824),
('Jocelyn', 0.000456878),
('Jodi', 0.001252405),
('Jody', 0.000741861),
('Jordan', 0.001653057),
('Joy', 0.000916515),
('Joyce', 0.001009488),
('Judith', 0.000870706),
('Judy', 0.001101586),
('Julia', 0.003301891),
('Julie', 0.008211731),
('Kaitlin', 0.000674473),
('Kaitlyn', 0.001478623),
('Kara', 0.001549119),
('Karen', 0.009643845),
('Kari', 0.000794323),
('Karina', 0.000494764),
('Karla', 0.000387696),
('Katelyn', 0.001476128),
('Katherine', 0.006581479),
('Kathleen', 0.00503549),
('Kathryn', 0.004177806),
('Kathy', 0.002710214),
('Katie', 0.003056216),
('Katrina', 0.001565446),
('Kayla', 0.004621465),
('Kaylee', 0.000551734),
('Kelli', 0.000932163),
('Kellie', 0.000299187),
('Kelly', 0.009342929),
('Kelsey', 0.002470383),
('Kendra', 0.001401079),
('Kerri', 0.000316215),
('Kerry', 0.000352984),
('Kiara', 0.000390037),
('Kim', 0.002518642),
('Kimberly', 0.015594077),
('Kirsten', 0.000369486),
('Krista', 0.001266872),
('Kristen', 0.004345587),
('Kristi', 0.001022926),
('Kristie', 0.000380189),
('Kristin', 0.003613728),
('Kristina', 0.002316281),
('Kristine', 0.000977709),
('Kristy', 0.001097734),
('Krystal', 0.001238113),
('Kylie', 0.00049739),
('Lacey', 0.00045469),
('Latasha', 0.00032904),
('Latoya', 0.000646371),
('Laura', 0.010815096),
('Lauren', 0.007015421),
('Laurie', 0.002200786),
('Leah', 0.001997571),
('Leslie', 0.003606134),
('Linda', 0.006437751),
('Lindsay', 0.002185466),
('Lindsey', 0.002646153),
('Lisa', 0.01872729),
('Loretta', 0.000482945),
('Lori', 0.006040316),
('Lorraine', 0.000486753),
('Lydia', 0.000370274),
('Lynn', 0.001522308),
('Mackenzie', 0.000761056),
('Madeline', 0.000808921),
('Madison', 0.002011184),
('Makayla', 0.000439391),
('Mallory', 0.000688633),
('Mandy', 0.000355566),
('Marcia', 0.000403213),
('Margaret', 0.003839968),
('Maria', 0.006593123),
('Mariah', 0.00097598),
('Marie', 0.001520229),
('Marilyn', 0.000590889),
('Marisa', 0.000339983),
('Marissa', 0.001582627),
('Martha', 0.001290028),
('Mary', 0.014288466),
('Maureen', 0.000753855),
('Mckenzie', 0.000334512),
('Meagan', 0.000729999),
('Megan', 0.007686786),
('Meghan', 0.001481578),
('Melanie', 0.003400117),
('Melinda', 0.002078113),
('Melissa', 0.014890692),
('Melody', 0.000404264),
('Mercedes', 0.000334643),
('Meredith', 0.000766987),
('Mia', 0.000319935),
('Michaela', 0.000506998),
('Michele', 0.003519551),
('Michelle', 0.01527423),
('Mikayla', 0.000410195),
('Mindy', 0.000306891),
('Miranda', 0.001421193),
('Misty', 0.001564614),
('Molly', 0.001710641),
('Monica', 0.004324095),
('Monique', 0.001272125),
('Morgan', 0.002527025),
('Nancy', 0.005023343),
('Natalie', 0.003658398),
('Natasha', 0.001739815),
('Nichole', 0.001001237),
('Nicole', 0.011156655),
('Nina', 0.000298115),
('Norma', 0.000470754),
('Olivia', 0.001967609),
('Paige', 0.001106313),
('Pam', 0.000374454),
('Pamela', 0.005816222),
('Patricia', 0.008349353),
('Patty', 0.000383493),
('Paula', 0.002478284),
('Peggy', 0.000810606),
('Penny', 0.000836564),
('Phyllis', 0.000562437),
('Priscilla', 0.000350226),
('Rachael', 0.001098128),
('Rachel', 0.00876108),
('Raven', 0.000404855),
('Rebecca', 0.010563161),
('Rebekah', 0.000858581),
('Regina', 0.001941739),
('Renee', 0.00257883),
('Rhonda', 0.002879221),
('Rita', 0.000719187),
('Roberta', 0.000461715),
('Robin', 0.00409199),
('Robyn', 0.00032138),
('Rose', 0.000697125),
('Ruth', 0.001041946),
('Sabrina', 0.001920969),
('Sally', 0.000532912),
('Samantha', 0.008186124),
('Sandra', 0.006473426),
('Sandy', 0.000497106),
('Sara', 0.005619879),
('Sarah', 0.014434273),
('Savannah', 0.000978344),
('Selena', 0.000329106),
('Shannon', 0.005952552),
('Shari', 0.000449043),
('Sharon', 0.004796469),
('Shawna', 0.000354209),
('Sheena', 0.000355763),
('Sheila', 0.00220129),
('Shelby', 0.001575601),
('Shelia', 0.000403673),
('Shelley', 0.000922227),
('Shelly', 0.001339469),
('Sheri', 0.000913166),
('Sherri', 0.001285038),
('Sherry', 0.002445235),
('Sheryl', 0.00057025),
('Shirley', 0.000833259),
('Sierra', 0.000954816),
('Sonia', 0.000332739),
('Sonya', 0.000914085),
('Sophia', 0.000535976),
('Stacey', 0.002836761),
('Stacie', 0.0003903),
('Stacy', 0.00311717),
('Stefanie', 0.00034644),
('Stephanie', 0.013595762),
('Sue', 0.000472877),
('Summer', 0.000411508),
('Susan', 0.0088973),
('Suzanne', 0.001943577),
('Sydney', 0.001220101),
('Sylvia', 0.000625798),
('Tabitha', 0.000428404),
('Tamara', 0.00212948),
('Tami', 0.000403651),
('Tammie', 0.00042337),
('Tammy', 0.006493584),
('Tanya', 0.002039024),
('Tara', 0.00316834),
('Tasha', 0.000355807),
('Taylor', 0.003996871),
('Teresa', 0.005060003),
('Terri', 0.001823903),
('Terry', 0.00060494),
('Theresa', 0.003492762),
('Tiffany', 0.006594283),
('Tina', 0.005186419),
('Toni', 0.000891695),
('Tonya', 0.002404133),
('Tracey', 0.001511146),
('Traci', 0.00086193),
('Tracie', 0.000301901),
('Tracy', 0.00498572),
('Tricia', 0.000449196),
('Valerie', 0.003218022),
('Vanessa', 0.003779189),
('Veronica', 0.003017805),
('Vicki', 0.00088653),
('Vickie', 0.000695199),
('Victoria', 0.005237677),
('Virginia', 0.001496482),
('Wanda', 0.001336186),
('Wendy', 0.004058263),
('Whitney', 0.001690768),
('Yesenia', 0.000331951),
('Yolanda', 0.001213819),
('Yvette', 0.000483427),
('Yvonne', 0.001005483),
('Zoe', 0.000367407),
))
first_names_male = OrderedDict((
('Aaron', 0.006741589),
('Adam', 0.007124922),
('Adrian', 0.001521889),
('Alan', 0.002344657),
('Albert', 0.001316595),
('Alec', 0.000442958),
('Alejandro', 0.000862489),
('Alex', 0.002111833),
('Alexander', 0.005215733),
('Alexis', 0.000277915),
('Alfred', 0.000318919),
('Allen', 0.001679613),
('Alvin', 0.00024794),
('Andre', 0.001400621),
('Andres', 0.000335574),
('Andrew', 0.013475074),
('Angel', 0.000902262),
('Anthony', 0.013783357),
('Antonio', 0.002392535),
('Arthur', 0.001342637),
('Austin', 0.003785615),
('Barry', 0.001102751),
('Benjamin', 0.006535474),
('Bernard', 0.000298691),
('Bill', 0.000430013),
('Billy', 0.001749806),
('Blake', 0.001218155),
('Bob', 0.000235731),
('Bobby', 0.001666977),
('Brad', 0.000984544),
('Bradley', 0.003845018),
('Brady', 0.000277522),
('Brandon', 0.009518346),
('Brendan', 0.000736758),
('Brent', 0.001889131),
('Brett', 0.002248371),
('Brian', 0.01597677),
('Bruce', 0.001883335),
('Bryan', 0.00456454),
('Bryce', 0.000457406),
('Caleb', 0.001485861),
('Calvin', 0.001168738),
('Cameron', 0.00180755),
('Carl', 0.002011802),
('Carlos', 0.00266638),
('Casey', 0.001440035),
('Cesar', 0.000304898),
('Chad', 0.003858817),
('Charles', 0.010889881),
('Chase', 0.000971942),
('Chris', 0.001389507),
('Christian', 0.003097779),
('Christopher', 0.02783596),
('Clarence', 0.000299289),
('Clayton', 0.000662222),
('Clifford', 0.00053078),
('Clinton', 0.000579307),
('Cody', 0.00353482),
('Cole', 0.000578811),
('Colin', 0.00078508),
('Collin', 0.000406057),
('Colton', 0.000520845),
('Connor', 0.000981073),
('Corey', 0.002476612),
('Cory', 0.001813005),
('Craig', 0.00338161),
('Cristian', 0.000333847),
('Curtis', 0.002140235),
('Dakota', 0.000797614),
('Dale', 0.001171354),
('Dalton', 0.000615113),
('Damon', 0.00034308),
('Dan', 0.000388496),
('Daniel', 0.018881874),
('Danny', 0.001873879),
('Darin', 0.000234962),
('Darius', 0.000336189),
('Darrell', 0.001218582),
('Darren', 0.001253738),
('Darryl', 0.00067019),
('Daryl', 0.000260918),
('Dave', 0.000269673),
('David', 0.031073833),
('Dean', 0.000965375),
('Dennis', 0.003318992),
('Derek', 0.003095299),
('Derrick', 0.001955921),
('Devin', 0.001312474),
('Devon', 0.000485877),
('Dillon', 0.000558361),
('Dominic', 0.000438221),
('Don', 0.000378322),
('Donald', 0.005689572),
('Douglas', 0.004513687),
('Drew', 0.000596868),
('Duane', 0.00061855),
('Dustin', 0.003088938),
('Dwayne', 0.000711382),
('Dylan', 0.002329096),
('Earl', 0.000348347),
('Eddie', 0.0007944),
('Edgar', 0.000379536),
('Eduardo', 0.000465358),
('Edward', 0.005702242),
('Edwin', 0.001117833),
('Elijah', 0.000592183),
('Eric', 0.012024659),
('Erik', 0.001997096),
('Ernest', 0.000746556),
('Ethan', 0.001143978),
('Eugene', 0.000784243),
('Evan', 0.001570691),
('Fernando', 0.000557608),
('Francis', 0.000330837),
('Francisco', 0.001084335),
('Frank', 0.003276449),
('Franklin', 0.000237561),
('Fred', 0.000396618),
('Frederick', 0.001104188),
('Gabriel', 0.001906504),
('Garrett', 0.001124861),
('Gary', 0.005023109),
('Gavin', 0.000295373),
('Gene', 0.00023426),
('Geoffrey', 0.000425978),
('George', 0.004423984),
('Gerald', 0.00165841),
('Gilbert', 0.000246726),
('Glen', 0.000374338),
('Glenn', 0.001111421),
('Gordon', 0.00027075),
('Grant', 0.00068322),
('Greg', 0.000623492),
('Gregg', 0.000235885),
('Gregory', 0.007676443),
('Guy', 0.000262645),
('Harold', 0.000929467),
('Harry', 0.000586934),
('Hayden', 0.000279454),
('Hector', 0.000798691),
('Henry', 0.001856232),
('Herbert', 0.000234226),
('Howard', 0.000712921),
('Hunter', 0.001034679),
('Ian', 0.001863192),
('Isaac', 0.001001951),
('Isaiah', 0.000625441),
('Ivan', 0.000350433),
('Jack', 0.001839748),
('Jackson', 0.000403253),
('Jacob', 0.007845384),
('Jaime', 0.000421378),
('Jake', 0.000565782),
('James', 0.029601617),
('Jamie', 0.00093552),
('Jared', 0.002538802),
('Jason', 0.01520513),
('Javier', 0.000625202),
('Jay', 0.001411462),
('Jeff', 0.001271436),
('Jeffery', 0.002627873),
('Jeffrey', 0.01225709),
('Jeremiah', 0.001209605),
('Jeremy', 0.006336079),
('Jermaine', 0.000450156),
('Jerome', 0.000634299),
('Jerry', 0.003150273),
('Jesse', 0.003884552),
('Jesus', 0.001628965),
('Jim', 0.000567714),
('Jimmy', 0.001607489),
('Joe', 0.001621544),
('Joel', 0.002537742),
('John', 0.028683008),
('Johnathan', 0.000840448),
('Johnny', 0.002117065),
('Jon', 0.001561184),
('Jonathan', 0.009963971),
('Jonathon', 0.000701157),
('Jordan', 0.003451546),
('Jorge', 0.001180553),
('Jose', 0.005368207),
('Joseph', 0.018604763),
('Joshua', 0.014808101),
('Juan', 0.003233598),
('Julian', 0.000693736),
('Justin', 0.010197889),
('Karl', 0.000362437),
('Keith', 0.004622866),
('Kelly', 0.000775283),
('Kenneth', 0.008318145),
('Kent', 0.000329418),
('Kerry', 0.000261448),
('Kevin', 0.014324157),
('Kirk', 0.0003801),
('Kristopher', 0.000580692),
('Kurt', 0.000716375),
('Kyle', 0.006350049),
('Lance', 0.001048495),
('Larry', 0.003658807),
('Lawrence', 0.001670294),
('Lee', 0.001223883),
('Leon', 0.000236347),
('Leonard', 0.000756713),
('Leroy', 0.000260234),
('Leslie', 0.000234637),
('Levi', 0.000347184),
('Logan', 0.001325812),
('Lonnie', 0.000258576),
('Louis', 0.001212255),
('Lucas', 0.001098237),
('Luis', 0.002427777),
('Luke', 0.001221455),
('Malik', 0.000306813),
('Manuel', 0.001331369),
('Marc', 0.001431947),
('Marco', 0.000290586),
('Marcus', 0.002604122),
('Mario', 0.001229337),
('Mark', 0.014382277),
('Martin', 0.002085226),
('Marvin', 0.000732962),
('Mason', 0.000562037),
('Mathew', 0.000605555),
('Matthew', 0.020425018),
('Maurice', 0.000777078),
('Max', 0.000311276),
('Maxwell', 0.000357478),
('Melvin', 0.00061932),
('Michael', 0.045602241),
('Micheal', 0.001273847),
('Miguel', 0.001416267),
('Mike', 0.001221797),
('Mitchell', 0.001747788),
('Nathan', 0.005039405),
('Nathaniel', 0.001887558),
('Neil', 0.000240331),
('Nicholas', 0.010021219),
('Nicolas', 0.000362522),
('Noah', 0.000960947),
('Norman', 0.000389043),
('Omar', 0.000639052),
('Oscar', 0.000946583),
('Parker', 0.000277522),
('Patrick', 0.007153255),
('Paul', 0.009272953),
('Pedro', 0.000275726),
('Perry', 0.000258644),
('Peter', 0.004340385),
('Philip', 0.002262956),
('Phillip', 0.00280273),
('Preston', 0.000292022),
('Ralph', 0.000836891),
('Randall', 0.001614722),
('Randy', 0.003021926),
('Ray', 0.000379451),
('Raymond', 0.003493952),
('Reginald', 0.00095108),
('Ricardo', 0.001197276),
('Richard', 0.014131961),
('Rick', 0.000440016),
('Rickey', 0.00023833),
('Ricky', 0.001856882),
('Riley', 0.000322031),
('Robert', 0.026938092),
('Roberto', 0.000906024),
('Rodney', 0.002180555),
('Roger', 0.002038032),
('Ronald', 0.00576775),
('Ronnie', 0.000905938),
('Ross', 0.00026863),
('Roy', 0.001311346),
('Ruben', 0.000774821),
('Russell', 0.002096221),
('Ryan', 0.01128178),
('Samuel', 0.00498019),
('Scott', 0.010580999),
('Sean', 0.005593456),
('Sergio', 0.000568518),
('Seth', 0.001537416),
('Shane', 0.002530218),
('Shannon', 0.000421583),
('Shaun', 0.000748761),
('Shawn', 0.004474546),
('Spencer', 0.000912094),
('Stanley', 0.000739032),
('Stephen', 0.007675365),
('Steve', 0.001407564),
('Steven', 0.013292898),
('Stuart', 0.000238826),
('Tanner', 0.000639292),
('Taylor', 0.00133036),
('Terrance', 0.000203311),
('Terrence', 0.000203704),
('Terry', 0.002873624),
('Theodore', 0.000596561),
('Thomas', 0.0143364),
('Tim', 0.000711126),
('Timothy', 0.012632608),
('Todd', 0.00414612),
('Tom', 0.000499283),
('Tommy', 0.000778737),
('Tony', 0.002511563),
('Tracy', 0.000728259),
('Travis', 0.004022458),
('Trevor', 0.001692523),
('Tristan', 0.000408759),
('Troy', 0.002695415),
('Tyler', 0.005962323),
('Tyrone', 0.000587207),
('Vernon', 0.000246401),
('Victor', 0.002340621),
('Vincent', 0.002494515),
('Walter', 0.001525891),
('Warren', 0.000317414),
('Wayne', 0.00160966),
('Wesley', 0.001733835),
('William', 0.020025989),
('Willie', 0.001379247),
('Wyatt', 0.000306591),
('Xavier', 0.000415222),
('Zachary', 0.005918634),
))
first_names = first_names_male.copy()
first_names.update(first_names_female)
# Top 1000 US surnames from US Census data
# Weighted by number of occurrences
# By way of http://names.mongabay.com/data/1000.html on 2/10/2016
last_names = OrderedDict((
('Smith', 0.021712045),
('Johnson', 0.01696938),
('Williams', 0.014016962),
('Brown', 0.012610763),
('Jones', 0.012451866),
('Miller', 0.010305045),
('Davis', 0.009798219),
('Garcia', 0.007842422),
('Rodriguez', 0.007348561),
('Wilson', 0.007154951),
('Martinez', 0.007082045),
('Anderson', 0.006966203),
('Taylor', 0.006582218),
('Thomas', 0.006493824),
('Hernandez', 0.006454314),
('Moore', 0.006383948),
('Martin', 0.006146745),
('Jackson', 0.006086567),
('Thompson', 0.005887767),
('White', 0.005843424),
('Lopez', 0.005679145),
('Lee', 0.005535909),
('Gonzalez', 0.005461513),
('Harris', 0.005423356),
('Clark', 0.005010598),
('Lewis', 0.00465937),
('Robinson', 0.004596305),
('Walker', 0.004580579),
('Perez', 0.00446375),
('Hall', 0.004327121),
('Young', 0.004257495),
('Allen', 0.00423392),
('Sanchez', 0.004031749),
('Wright', 0.004023754),
('King', 0.004011135),
('Scott', 0.003838487),
('Green', 0.003778053),
('Baker', 0.003776901),
('Adams', 0.00377448),
('Nelson', 0.003766713),
('Hill', 0.003762455),
('Ramirez', 0.003554281),
('Campbell', 0.003398636),
('Mitchell', 0.003357336),
('Roberts', 0.003346207),
('Carter', 0.0033127),
('Phillips', 0.003214932),
('Evans', 0.003127113),
('Turner', 0.003067045),
('Torres', 0.002971158),
('Parker', 0.002962725),
('Collins', 0.002904264),
('Edwards', 0.002897155),
('Stewart', 0.002859044),
('Flores', 0.002856449),
('Morris', 0.002848582),
('Nguyen', 0.002833697),
('Murphy', 0.00274576),
('Rivera', 0.002736275),
('Cook', 0.002693623),
('Rogers', 0.002690041),
('Morgan', 0.002525543),
('Peterson', 0.002513125),
('Cooper', 0.00246795),
('Reed', 0.0024437),
('Bailey', 0.002429747),
('Bell', 0.002419112),
('Gomez', 0.002408494),
('Kelly', 0.002379209),
('Howard', 0.002327986),
('Ward', 0.002321973),
('Cox', 0.002318775),
('Diaz', 0.00230051),
('Richardson', 0.002280051),
('Wood', 0.002259639),
('Watson', 0.002215168),
('Brooks', 0.002199808),
('Bennett', 0.002184311),
('Gray', 0.002162912),
('James', 0.002131032),
('Reyes', 0.002124517),
('Cruz', 0.002111304),
('Hughes', 0.002095999),
('Price', 0.002090206),
('Myers', 0.002054278),
('Long', 0.002042126),
('Foster', 0.002019703),
('Sanders', 0.002018442),
('Ross', 0.002009844),
('Morales', 0.001988655),
('Powell', 0.001978704),
('Sullivan', 0.001970362),
('Russell', 0.001968461),
('Ortiz', 0.001961617),
('Jenkins', 0.001952974),
('Gutierrez', 0.001945371),
('Perry', 0.001942986),
('Butler', 0.001926859),
('Barnes', 0.00192272),
('Fisher', 0.001921377),
('Henderson', 0.001919686),
('Coleman', 0.001906255),
('Simmons', 0.001842531),
('Patterson', 0.00181427),
('Jordan', 0.00180198),
('Reynolds', 0.001787233),
('Hamilton', 0.001775656),
('Graham', 0.001773307),
('Kim', 0.001773243),
('Gonzales', 0.001772028),
('Alexander', 0.001767542),
('Ramos', 0.001764371),
('Wallace', 0.001743026),
('Griffin', 0.001741893),
('West', 0.001722047),
('Cole', 0.001715916),
('Hayes', 0.001712992),
('Chavez', 0.001698299),
('Gibson', 0.001685096),
('Bryant', 0.001679075),
('Ellis', 0.001662381),
('Stevens', 0.001657657),
('Murray', 0.001630218),
('Ford', 0.001630062),
('Marshall', 0.001619244),
('Owens', 0.001611212),
('Mcdonald', 0.001609019),
('Harrison', 0.001604295),
('Ruiz', 0.001602943),
('Kennedy', 0.001568285),
('Wells', 0.001559139),
('Alvarez', 0.001542527),
('Woods', 0.0015425),
('Mendoza', 0.001540243),
('Castillo', 0.001511972),
('Olson', 0.001493963),
('Webb', 0.001493771),
('Washington', 0.001489705),
('Tucker', 0.001488763),
('Freeman', 0.001486507),
('Burns', 0.001481636),
('Henry', 0.001474683),
('Vasquez', 0.001461863),
('Snyder', 0.001456143),
('Simpson', 0.001445891),
('Crawford', 0.001444795),
('Jimenez', 0.001438892),
('Porter', 0.001433163),
('Mason', 0.0014207),
('Shaw', 0.001417849),
('Gordon', 0.001415674),
('Wagner', 0.001411855),
('Hunter', 0.001410886),
('Romero', 0.001405057),
('Hicks', 0.00140365),
('Dixon', 0.001389003),
('Hunt', 0.001388738),
('Palmer', 0.00137431),
('Robertson', 0.001373323),
('Black', 0.001372291),
('Holmes', 0.001372108),
('Stone', 0.001368782),
('Meyer', 0.001367521),
('Boyd', 0.001365803),
('Mills', 0.001351485),
('Warren', 0.001351458),
('Fox', 0.001346441),
('Rose', 0.001342485),
('Rice', 0.001338062),
('Moreno', 0.001334846),
('Schmidt', 0.001330067),
('Patel', 0.001325508),
('Ferguson', 0.001299832),
('Nichols', 0.001296908),
('Herrera', 0.0012864),
('Medina', 0.001273307),
('Ryan', 0.001273142),
('Fernandez', 0.001272841),
('Weaver', 0.001268354),
('Daniels', 0.001268034),
('Stephens', 0.001267724),
('Gardner', 0.001266974),
('Payne', 0.0012612),
('Kelley', 0.001256878),
('Dunn', 0.001251395),
('Pierce', 0.001247393),
('Arnold', 0.001245547),
('Tran', 0.001243537),
('Spencer', 0.001228443),
('Peters', 0.001226505),
('Hawkins', 0.001224998),
('Grant', 0.001224705),
('Hansen', 0.001219589),
('Castro', 0.001217578),
('Hoffman', 0.001212014),
('Hart', 0.001210378),
('Elliott', 0.001210296),
('Cunningham', 0.00120517),
('Knight', 0.001204841),
('Bradley', 0.001199624),
('Carroll', 0.001197166),
('Hudson', 0.001195091),
('Duncan', 0.001191674),
('Armstrong', 0.001187681),
('Berry', 0.001182409),
('Andrews', 0.001181632),
('Johnston', 0.001178114),
('Ray', 0.001176826),
('Lane', 0.001176214),
('Riley', 0.001169206),
('Carpenter', 0.001161101),
('Perkins', 0.001159986),
('Aguilar', 0.001154942),
('Silva', 0.001152795),
('Richards', 0.001148126),
('Willis', 0.001147888),
('Matthews', 0.001140688),
('Chapman', 0.001138632),
('Lawrence', 0.001135955),
('Garza', 0.00113421),
('Vargas', 0.001132583),
('Watkins', 0.001118832),
('Wheeler', 0.00111186),
('Larson', 0.001106195),
('Carlson', 0.001097606),
('Harper', 0.001095267),
('George', 0.001094444),
('Greene', 0.001092855),
('Burke', 0.001088935),
('Guzman', 0.001081762),
('Morrison', 0.001077641),
('Munoz', 0.001076133),
('Jacobs', 0.001055721),
('Obrien', 0.001054304),
('Lawson', 0.001052486),
('Franklin', 0.001049498),
('Lynch', 0.001045743),
('Bishop', 0.00104196),
('Carr', 0.001040662),
('Salazar', 0.001036788),
('Austin', 0.001033974),
('Mendez', 0.0010301),
('Gilbert', 0.001027084),
('Jensen', 0.001026408),
('Williamson', 0.001025348),
('Montgomery', 0.00102469),
('Harvey', 0.001024617),
('Oliver', 0.001020094),
('Howell', 0.001001756),
('Dean', 0.000998064),
('Hanson', 0.000996685),
('Weber', 0.000985601),
('Garrett', 0.000984788),
('Sims', 0.000979918),
('Burton', 0.000979132),
('Fuller', 0.000974783),
('Soto', 0.000974317),
('Mccoy', 0.000972946),
('Welch', 0.00096676),
('Chen', 0.000964384),
('Schultz', 0.000959067),
('Walters', 0.000952844),
('Reid', 0.00095034),
('Fields', 0.00094335),
('Walsh', 0.000943113),
('Little', 0.000938563),
('Fowler', 0.000937667),
('Bowman', 0.000934186),
('Davidson', 0.000932404),
('May', 0.000929498),
('Day', 0.000929041),
('Schneider', 0.00091878),
('Newman', 0.000918214),
('Brewer', 0.000917976),
('Lucas', 0.000917538),
('Holland', 0.000912677),
('Wong', 0.000908172),
('Banks', 0.000907276),
('Santos', 0.000904526),
('Curtis', 0.000904206),
('Pearson', 0.000902105),
('Delgado', 0.000901621),
('Valdez', 0.000901027),
('Pena', 0.000898605),
('Rios', 0.000882377),
('Douglas', 0.000881062),
('Sandoval', 0.000879947),
('Barrett', 0.000876228),
('Hopkins', 0.000864414),
('Keller', 0.000861645),
('Guerrero', 0.000860293),
('Stanley', 0.000857232),
('Bates', 0.000856555),
('Alvarado', 0.000856373),
('Beck', 0.000851238),
('Ortega', 0.000850963),
('Wade', 0.00084825),
('Estrada', 0.000848222),
('Contreras', 0.00084666),
('Barnett', 0.000843252),
('Caldwell', 0.00083458),
('Santiago', 0.00083119),
('Lambert', 0.000828001),
('Powers', 0.000826019),
('Chambers', 0.000825324),
('Nunez', 0.000824255),
('Craig', 0.000818618),
('Leonard', 0.000815027),
('Lowe', 0.000814844),
('Rhodes', 0.000812459),
('Byrd', 0.00081149),
('Gregory', 0.000811481),
('Shelton', 0.000807059),
('Frazier', 0.00080705),
('Becker', 0.000805122),
('Maldonado', 0.000804226),
('Fleming', 0.000803614),
('Vega', 0.000801595),
('Sutton', 0.000798351),
('Cohen', 0.000797008),
('Jennings', 0.00079529),
('Parks', 0.000788967),
('Mcdaniel', 0.000788702),
('Watts', 0.000787889),
('Barker', 0.000778688),
('Norris', 0.000778605),
('Vaughn', 0.000777006),
('Vazquez', 0.000775992),
('Holt', 0.000774018),
('Schwartz', 0.000773918),
('Steele', 0.000770756),
('Benson', 0.00076966),
('Neal', 0.000766151),
('Dominguez', 0.000765073),
('Horton', 0.000763173),
('Terry', 0.000762387),
('Wolfe', 0.000759417),
('Hale', 0.000757983),
('Lyons', 0.000751614),
('Graves', 0.000750892),
('Haynes', 0.000749595),
('Miles', 0.000748644),
('Park', 0.000748251),
('Warner', 0.000747648),
('Padilla', 0.000747475),
('Bush', 0.000744907),
('Thornton', 0.000741864),
('Mccarthy', 0.000740439),
('Mann', 0.00074032),
('Zimmerman', 0.000739608),
('Erickson', 0.000739534),
('Fletcher', 0.000739498),
('Mckinney', 0.00073661),
('Page', 0.000735487),
('Dawson', 0.000732718),
('Joseph', 0.000731256),
('Marquez', 0.000730534),
('Reeves', 0.00072931),
('Klein', 0.000728104),
('Espinoza', 0.000724787),
('Baldwin', 0.000723224),
('Moran', 0.000717696),
('Love', 0.000715659),
('Robbins', 0.000713996),
('Higgins', 0.000713685),
('Ball', 0.000708696),
('Cortez', 0.000708066),
('Le', 0.000707709),
('Griffith', 0.00070749),
('Bowen', 0.000704283),
('Sharp', 0.000702364),
('Cummings', 0.000700893),
('Ramsey', 0.000700144),
('Hardy', 0.000699988),
('Swanson', 0.000699358),
('Barber', 0.000699038),
('Acosta', 0.000698791),
('Luna', 0.000695593),
('Chandler', 0.000695474),
('Daniel', 0.000686529),
('Blair', 0.000686529),
('Cross', 0.00068652),
('Simon', 0.000683824),
('Dennis', 0.000683322),
('Oconnor', 0.000683066),
('Quinn', 0.00068101),
('Gross', 0.000678762),
('Navarro', 0.000675884),
('Moss', 0.000673874),
('Fitzgerald', 0.000671791),
('Doyle', 0.000671754),
('Mclaughlin', 0.000668191),
('Rojas', 0.00066767),
('Rodgers', 0.000667213),
('Stevenson', 0.000666034),
('Singh', 0.00066375),
('Yang', 0.000663613),
('Figueroa', 0.000662754),
('Harmon', 0.000661667),
('Newton', 0.000660881),
('Paul', 0.00066015),
('Manning', 0.000658514),
('Garner', 0.000658359),
('Mcgee', 0.000657198),
('Reese', 0.000655636),
('Francis', 0.000655353),
('Burgess', 0.000654265),
('Adkins', 0.000653571),
('Goodman', 0.000653151),
('Curry', 0.00065189),
('Brady', 0.000650345),
('Christensen', 0.000650062),
('Potter', 0.000649688),
('Walton', 0.000648719),
('Goodwin', 0.000642652),
('Mullins', 0.000642222),
('Molina', 0.000641537),
('Webster', 0.000640733),
('Fischer', 0.000640477),
('Campos', 0.000639152),
('Avila', 0.000638175),
('Sherman', 0.000638147),
('Todd', 0.000637873),
('Chang', 0.00063738),
('Blake', 0.000633021),
('Malone', 0.00063282),
('Wolf', 0.000629604),
('Hodges', 0.000629266),
('Juarez', 0.000628507),
('Gill', 0.000627722),
('Farmer', 0.000624158),
('Hines', 0.00062266),
('Gallagher', 0.00062202),
('Duran', 0.000621755),
('Hubbard', 0.000621527),
('Cannon', 0.000620631),
('Miranda', 0.0006181),
('Wang', 0.000617406),
('Saunders', 0.000614116),
('Tate', 0.000614098),
('Mack', 0.000613604),
('Hammond', 0.000612773),
('Carrillo', 0.000612691),
('Townsend', 0.000610854),
('Wise', 0.000609803),
('Ingram', 0.000609136),
('Barton', 0.000608743),
('Mejia', 0.000607939),
('Ayala', 0.000607766),
('Schroeder', 0.000606825),
('Hampton', 0.000606514),
('Rowe', 0.000604933),
('Parsons', 0.000604915),
('Frank', 0.000602311),
('Waters', 0.000601388),
('Strickland', 0.000601361),
('Osborne', 0.000601251),
('Maxwell', 0.000601041),
('Chan', 0.000600493),
('Deleon', 0.000599387),
('Norman', 0.000596381),
('Harrington', 0.00059512),
('Casey', 0.000592232),
('Patton', 0.00059184),
('Logan', 0.000590049),
('Bowers', 0.000589318),
('Mueller', 0.000587572),
('Glover', 0.00058643),
('Floyd', 0.000586074),
('Hartman', 0.000583205),
('Buchanan', 0.000583187),
('Cobb', 0.000582401),
('French', 0.00057701),
('Kramer', 0.000575858),
('Mccormick', 0.000572569),
('Clarke', 0.0005715),
('Tyler', 0.00057139),
('Gibbs', 0.000571208),
('Moody', 0.000569654),
('Conner', 0.000569572),
('Sparks', 0.000568649),
('Mcguire', 0.000567571),
('Leon', 0.000566822),
('Bauer', 0.000566319),
('Norton', 0.000564729),
('Pope', 0.000564227),
('Flynn', 0.000564199),
('Hogan', 0.000563322),
('Robles', 0.00056303),
('Salinas', 0.000562692),
('Yates', 0.000561029),
('Lindsey', 0.000559192),
('Lloyd', 0.000558781),
('Marsh', 0.000557365),
('Mcbride', 0.000556222),
('Owen', 0.000552449),
('Solis', 0.000548648),
('Pham', 0.00054777),
('Lang', 0.000546802),
('Pratt', 0.000546418),
('Lara', 0.000545779),
('Brock', 0.000545331),
('Ballard', 0.00054513),
('Trujillo', 0.000544664),
('Shaffer', 0.000541173),
('Drake', 0.000539602),
('Roman', 0.000539282),
('Aguirre', 0.00053835),
('Morton', 0.000537162),
('Stokes', 0.000536239),
('Lamb', 0.000535033),
('Pacheco', 0.000534841),
('Patrick', 0.00053231),
('Cochran', 0.000532091),
('Shepherd', 0.000529368),
('Cain', 0.000528801),
('Burnett', 0.000528674),
('Hess', 0.000528335),
('Li', 0.000528007),
('Cervantes', 0.000527084),
('Olsen', 0.000524087),
('Briggs', 0.000523538),
('Ochoa', 0.000522743),
('Cabrera', 0.000522387),
('Velasquez', 0.000522314),
('Montoya', 0.00052151),
('Roth', 0.000521099),
('Meyers', 0.000518485),
('Cardenas', 0.000517334),
('Fuentes', 0.000515717),
('Weiss', 0.000513085),
('Wilkins', 0.000512309),
('Hoover', 0.000512309),
('Nicholson', 0.000511559),
('Underwood', 0.000511441),
('Short', 0.000510801),
('Carson', 0.000510052),
('Morrow', 0.000508617),
('Colon', 0.000507228),
('Holloway', 0.000506808),
('Summers', 0.000506123),
('Bryan', 0.000505008),
('Petersen', 0.00050424),
('Mckenzie', 0.000503318),
('Serrano', 0.000503071),
('Wilcox', 0.000502431),
('Carey', 0.000501856),
('Clayton', 0.000501408),
('Poole', 0.000499864),
('Calderon', 0.000499727),
('Gallegos', 0.000499553),
('Greer', 0.000498996),
('Rivas', 0.000498786),
('Guerra', 0.000498667),
('Decker', 0.000497525),
('Collier', 0.000497196),
('Wall', 0.000497077),
('Whitaker', 0.000496547),
('Bass', 0.000496117),
('Flowers', 0.000495944),
('Davenport', 0.000495295),
('Conley', 0.000495185),
('Houston', 0.00049365),
('Huff', 0.000492426),
('Copeland', 0.00049132),
('Hood', 0.00049101),
('Monroe', 0.000488616),
('Massey', 0.00048847),
('Roberson', 0.000486085),
('Combs', 0.00048592),
('Franco', 0.000485747),
('Larsen', 0.000483937),
('Pittman', 0.000481434),
('Randall', 0.000479661),
('Skinner', 0.000479616),
('Wilkinson', 0.000479552),
('Kirby', 0.00047946),
('Cameron', 0.00047915),
('Bridges', 0.000477514),
('Anthony', 0.000476472),
('Richard', 0.000476399),
('Kirk', 0.00047565),
('Bruce', 0.000475175),
('Singleton', 0.000473283),
('Mathis', 0.000473274),
('Bradford', 0.000472635),
('Boone', 0.000472205),
('Abbott', 0.000471666),
('Charles', 0.000470734),
('Allison', 0.000470606),
('Sweeney', 0.00047057),
('Atkinson', 0.000470469),
('Horn', 0.000469473),
('Jefferson', 0.0004693),
('Rosales', 0.000469071),
('York', 0.000469053),
('Christian', 0.000467618),
('Phelps', 0.000467408),
('Farrell', 0.000466869),
('Castaneda', 0.000466814),
('Nash', 0.000466193),
('Dickerson', 0.000466156),
('Bond', 0.000465818),
('Wyatt', 0.00046485),
('Foley', 0.000464649),
('Chase', 0.000463963),
('Gates', 0.000463698),
('Vincent', 0.000462602),
('Mathews', 0.000462419),
('Hodge', 0.000462136),
('Garrison', 0.000461268),
('Trevino', 0.000461012),
('Villarreal', 0.000460071),
('Heath', 0.000459669),
('Dalton', 0.00045838),
('Valencia', 0.000457101),
('Callahan', 0.000456178),
('Hensley', 0.000455566),
('Atkins', 0.000454616),
('Huffman', 0.000454461),
('Roy', 0.000454351),
('Boyer', 0.000453218),
('Shields', 0.000452807),
('Lin', 0.000451016),
('Hancock', 0.000450742),
('Grimes', 0.000449965),
('Glenn', 0.000449929),
('Cline', 0.000449252),
('Delacruz', 0.00044917),
('Camacho', 0.000447726),
('Dillon', 0.0004462),
('Parrish', 0.000446109),
('Oneill', 0.000444583),
('Melton', 0.000444017),
('Booth', 0.000443889),
('Kane', 0.000443404),
('Berg', 0.000442975),
('Harrell', 0.000442893),
('Pitts', 0.000442811),
('Savage', 0.000441943),
('Wiggins', 0.000441833),
('Brennan', 0.000441294),
('Salas', 0.000441166),
('Marks', 0.000441157),
('Russo', 0.00043974),
('Sawyer', 0.000438397),
('Baxter', 0.000437283),
('Golden', 0.000437118),
('Hutchinson', 0.000436844),
('Liu', 0.000435528),
('Walter', 0.000435071),
('Mcdowell', 0.000434258),
('Wiley', 0.000434048),
('Rich', 0.00043381),
('Humphrey', 0.000433746),
('Johns', 0.000432093),
('Koch', 0.000432065),
('Suarez', 0.000431599),
('Hobbs', 0.000431462),
('Beard', 0.000430621),
('Gilmore', 0.000429909),
('Ibarra', 0.000428492),
('Keith', 0.00042714),
('Macias', 0.000427067),
('Khan', 0.000426829),
('Andrade', 0.000426729),
('Ware', 0.000426546),
('Stephenson', 0.000426363),
('Henson', 0.000425879),
('Wilkerson', 0.000425843),
('Dyer', 0.000425559),
('Mcclure', 0.000424929),
('Blackwell', 0.000424838),
('Mercado', 0.000424308),
('Tanner', 0.000424079),
('Eaton', 0.000423997),
('Clay', 0.000422727),
('Barron', 0.000422106),
('Beasley', 0.00042195),
('Oneal', 0.000421786),
('Small', 0.000418944),
('Preston', 0.000418944),
('Wu', 0.000418624),
('Zamora', 0.000418542),
('Macdonald', 0.000418323),
('Vance', 0.000418149),
('Snow', 0.000417473),
('Mcclain', 0.000416294),
('Stafford', 0.000414366),
('Orozco', 0.000413818),
('Barry', 0.000411579),
('English', 0.00041147),
('Shannon', 0.000410282),
('Kline', 0.000410264),
('Jacobson', 0.000410026),
('Woodard', 0.000409624),
('Huang', 0.000408573),
('Kemp', 0.000408445),
('Mosley', 0.000408418),
('Prince', 0.000407888),
('Merritt', 0.00040776),
('Hurst', 0.000407404),
('Villanueva', 0.000407248),
('Roach', 0.000406188),
('Nolan', 0.000405887),
('Lam', 0.000405558),
('Yoder', 0.000404279),
('Mccullough', 0.000403164),
('Lester', 0.0004013),
('Santana', 0.000400898),
('Valenzuela', 0.000399938),
('Winters', 0.000399865),
('Barrera', 0.000399482),
('Orr', 0.000398988),
('Leach', 0.000398988),
('Berger', 0.000397983),
('Mckee', 0.000397974),
('Strong', 0.000396832),
('Conway', 0.000396512),
('Stein', 0.000395927),
('Whitehead', 0.000395735),
('Bullock', 0.000393095),
('Escobar', 0.000392492),
('Knox', 0.000392327),
('Meadows', 0.000391843),
('Solomon', 0.000391432),
('Velez', 0.000391258),
('Odonnell', 0.000391094),
('Kerr', 0.000390692),
('Stout', 0.000389878),
('Blankenship', 0.000389824),
('Browning', 0.000389632),
('Kent', 0.00038922),
('Lozano', 0.000388946),
('Bartlett', 0.000388444),
('Pruitt', 0.000387996),
('Buck', 0.000387795),
('Barr', 0.000387713),
('Gaines', 0.000387137),
('Durham', 0.000387101),
('Gentry', 0.000387028),
('Mcintyre', 0.000386826),
('Sloan', 0.000386333),
('Rocha', 0.000385036),
('Melendez', 0.000385036),
('Herman', 0.000384597),
('Sexton', 0.000384496),
('Moon', 0.000384332),
('Hendricks', 0.00038266),
('Rangel', 0.000382559),
('Stark', 0.000382514),
('Lowery', 0.00038075),
('Hardin', 0.000380695),
('Hull', 0.000380622),
('Sellers', 0.000379754),
('Ellison', 0.000378822),
('Calhoun', 0.000378758),
('Gillespie', 0.000378219),
('Mora', 0.000377808),
('Knapp', 0.000377068),
('Mccall', 0.000376739),
('Morse', 0.000375652),
('Dorsey', 0.000375579),
('Weeks', 0.000375113),
('Nielsen', 0.000374692),
('Livingston', 0.000374299),
('Leblanc', 0.000373925),
('Mclean', 0.00037345),
('Bradshaw', 0.000372746),
('Glass', 0.000372106),
('Middleton', 0.00037196),
('Buckley', 0.000371942),
('Schaefer', 0.000371549),
('Frost', 0.000370809),
('Howe', 0.000370562),
('House', 0.000369849),
('Mcintosh', 0.00036963),
('Ho', 0.000369265),
('Pennington', 0.000368588),
('Reilly', 0.000368324),
('Hebert', 0.000368077),
('Mcfarland', 0.00036772),
('Hickman', 0.000367538),
('Noble', 0.000367474),
('Spears', 0.000367346),
('Conrad', 0.000366423),
('Arias', 0.000366277),
('Galvan', 0.000365911),
('Velazquez', 0.000365765),
('Huynh', 0.000365591),
('Frederick', 0.000364659),
('Randolph', 0.000363134),
('Cantu', 0.000361845),
('Fitzpatrick', 0.000360931),
('Mahoney', 0.000360374),
('Peck', 0.000360301),
('Villa', 0.000360027),
('Michael', 0.000359725),
('Donovan', 0.000358821),
('Mcconnell', 0.000358209),
('Walls', 0.00035787),
('Boyle', 0.000357642),
('Mayer', 0.000357368),
('Zuniga', 0.000356875),
('Giles', 0.000356372),
('Pineda', 0.000356345),
('Pace', 0.000356125),
('Hurley', 0.000356089),
('Mays', 0.000355568),
('Mcmillan', 0.000355403),
('Crosby', 0.000354928),
('Ayers', 0.000354855),
('Case', 0.000354152),
('Bentley', 0.00035374),
('Shepard', 0.000353658),
('Everett', 0.000353631),
('Pugh', 0.00035353),
('David', 0.000353238),
('Mcmahon', 0.000352306),
('Dunlap', 0.000351931),
('Bender', 0.000351456),
('Hahn', 0.000350451),
('Harding', 0.000350323),
('Acevedo', 0.000349336),
('Raymond', 0.00034866),
('Blackburn', 0.000348468),
('Duffy', 0.000346869),
('Landry', 0.00034686),
('Dougherty', 0.00034633),
('Bautista', 0.000345818),
('Shah', 0.00034569),
('Potts', 0.000344356),
('Arroyo', 0.000344274),
('Valentine', 0.000344192),
('Meza', 0.000344128),
('Gould', 0.00034411),
('Vaughan', 0.000343479),
('Fry', 0.000343032),
('Rush', 0.000342374),
('Avery', 0.0003421),
('Herring', 0.000341305),
('Dodson', 0.000340802),
('Clements', 0.000340245),
('Sampson', 0.000340217),
('Tapia', 0.000339916),
('Bean', 0.000339404),
('Lynn', 0.000339221),
('Crane', 0.000339203),
('Farley', 0.000339139),
('Cisneros', 0.000338536),
('Benton', 0.000338372),
('Ashley', 0.000338271),
('Mckay', 0.000337604),
('Finley', 0.000336928),
('Best', 0.000336818),
('Blevins', 0.000336626),
('Friedman', 0.000336553),
('Moses', 0.00033638),
('Sosa', 0.00033637),
('Blanchard', 0.000335923),
('Huber', 0.000335603),
('Frye', 0.000335484),
('Krueger', 0.000335283),
('Bernard', 0.000333931),
('Rosario', 0.000333867),
('Rubio', 0.000333794),
('Mullen', 0.000332981),
('Benjamin', 0.000332953),
('Haley', 0.000332898),
('Chung', 0.000332798),
('Moyer', 0.000332789),
('Choi', 0.000332505),
('Horne', 0.000331573),
('Yu', 0.000331546),
('Woodward', 0.000331153),
('Ali', 0.000329664),
('Nixon', 0.00032928),
('Hayden', 0.000329161),
('Rivers', 0.000328759),
('Estes', 0.000327471),
('Mccarty', 0.000326365),
('Richmond', 0.000326338),
('Stuart', 0.00032621),
('Maynard', 0.000325726),
('Brandt', 0.000325433),
('Oconnell', 0.000325378),
('Hanna', 0.000325278),
('Sanford', 0.000324967),
('Sheppard', 0.000324867),
('Church', 0.00032473),
('Burch', 0.000324565),
('Levy', 0.000324044),
('Rasmussen', 0.000323944),
('Coffey', 0.000323843),
('Ponce', 0.000323459),
('Faulkner', 0.000323359),
('Donaldson', 0.000323341),
('Schmitt', 0.000322783),
('Novak', 0.000322381),
('Costa', 0.000321879),
('Montes', 0.000321595),
('Booker', 0.000320727),
('Cordova', 0.000320481),
('Waller', 0.000319814),
('Arellano', 0.000319795),
('Maddox', 0.00031953),
('Mata', 0.000318781),
('Bonilla', 0.000318196),
('Stanton', 0.000318087),
('Compton', 0.000317867),
('Kaufman', 0.000317849),
('Dudley', 0.000317703),
('Mcpherson', 0.000317639),
('Beltran', 0.000317392),
('Dickson', 0.000317045),
('Mccann', 0.00031699),
('Villegas', 0.000316917),
('Proctor', 0.000316899),
('Hester', 0.000316835),
('Cantrell', 0.000316826),
('Daugherty', 0.000316607),
('Cherry', 0.000316287),
('Bray', 0.000315921),
('Davila', 0.000315611),
('Rowland', 0.000315218),
('Madden', 0.00031498),
('Levine', 0.00031498),
('Spence', 0.000314642),
('Good', 0.000314596),
('Irwin', 0.000314085),
('Werner', 0.000313884),
('Krause', 0.00031382),
('Petty', 0.000313207),
('Whitney', 0.000312961),
('Baird', 0.000312796),
('Hooper', 0.000311435),
('Pollard', 0.000311389),
('Zavala', 0.000311289),
('Jarvis', 0.000311124),
('Holden', 0.000311042),
('Hendrix', 0.00031096),
('Haas', 0.00031096),
('Mcgrath', 0.000310951),
('Bird', 0.00031032),
('Lucero', 0.000309955),
('Terrell', 0.000309882),
('Riggs', 0.000309461),
('Joyce', 0.000309233),
('Rollins', 0.000308812),
('Mercer', 0.000308812),
('Galloway', 0.000308593),
('Duke', 0.000308337),
('Odom', 0.000308081),
('Andersen', 0.000306172),
('Downs', 0.000306044),
('Hatfield', 0.00030577),
('Benitez', 0.00030556),
('Archer', 0.000305285),
('Huerta', 0.00030471),
('Travis', 0.000304628),
('Mcneil', 0.000303714),
('Hinton', 0.00030344),
('Zhang', 0.000303376),
('Hays', 0.000303303),
('Mayo', 0.000302681),
('Fritz', 0.000302151),
('Branch', 0.000301896),
('Mooney', 0.000301101),
('Ewing', 0.000300845),
('Ritter', 0.000300287),
('Esparza', 0.000299447),
('Frey', 0.000299109),
('Braun', 0.00029857),
('Gay', 0.000298533),
('Riddle', 0.000298369),
('Haney', 0.000298277),
('Kaiser', 0.000297574),
('Holder', 0.000296651),
('Chaney', 0.000296349),
('Mcknight', 0.00029592),
('Gamble', 0.000295838),
('Vang', 0.000295435),
('Cooley', 0.000295015),
('Carney', 0.000294969),
('Cowan', 0.000294604),
('Forbes', 0.000294476),
('Ferrell', 0.000293983),
('Davies', 0.0002939),
('Barajas', 0.000293736),
('Shea', 0.000293023),
('Osborn', 0.000292795),
('Bright', 0.000292777),
('Cuevas', 0.00029253),
('Bolton', 0.000292347),
('Murillo', 0.000292064),
('Lutz', 0.000291845),
('Duarte', 0.000291442),
('Kidd', 0.000291351),
('Key', 0.000291315),
('Cooke', 0.000291114),
))
prefixes_female = OrderedDict((
('Mrs.', 0.5),
('Ms.', 0.1),
('Miss', 0.1),
('Dr.', 0.3),
))
prefixes_male = OrderedDict((
('Mr.', 0.7),
('Dr.', 0.3),
))
suffixes_female = OrderedDict((
('MD', 0.5),
('DDS', 0.3),
('PhD', 0.1),
('DVM', 0.2),
))
# Removed Sr and I as they'd almost never be part of legal names.
suffixes_male = OrderedDict((
('Jr.', 0.2),
('II', 0.05),
('III', 0.03),
('IV', 0.015),
('V', 0.005),
('MD', 0.3),
('DDS', 0.2),
('PhD', 0.1),
('DVM', 0.1),
))
|
danhuss/faker
|
faker/providers/person/en_US/__init__.py
|
Python
|
mit
| 58,191
|
[
"Amber",
"Brian",
"CRYSTAL",
"Dalton"
] |
78ce1a771922f819461c3a2e5ab451f511e1d10fa7506c2abe9022d1c68924fc
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Developer tools for MooseDocs."""
import argparse
import os
import re
import collections
import logging
import MooseDocs
import moosesqa
import moosetree
import mooseutils
from .. import common
#from ..common import exceptions
#from ..tree import syntax
from ..extensions import template
LOG = logging.getLogger(__name__)
def command_line_options(subparser, parent):
"""Define the 'check' command."""
parser = subparser.add_parser('check',
parents=[parent],
help="Tool for performing SQA error checking and "
"creating/updating documentation stub pages.")
parser.add_argument('--config', type=str, default='sqa_reports.yml',
help="The YAML config file for performing SQA checks.")
parser.add_argument('--reports', nargs='+', default=['doc', 'req', 'app'], choices=['doc', 'req', 'app'],
help='Select the reports to produce.')
parser.add_argument('--show-warnings', action='store_true',
help='Display all report warnings.')
parser.add_argument('--generate', nargs='+', default=None, help='Deprecated')
parser.add_argument('--dump', nargs='+', default=None, help='Deprecated')
parser.add_argument('--app-reports', nargs='+', default=None,
help='Limit to the following application reports (e.g. --app-reports navier_stokes')
parser.add_argument('--req-reports', nargs='+', default=None,
help='Limit to the following requirement reports (e.g. --req-reports navier_stokes')
def _print_reports(title, reports, status):
"""Helper for printing SQAReport objects and propagating status"""
if reports:
print(mooseutils.colorText('\n{0}\n{1} REPORT(S):\n{0}\n'.format('-'*80, title.upper()), 'MAGENTA'), end='', flush=True)
for report in reports:
print(report.getReport(), '\n')
status = report.status if status < report.status else status
return status
def _enable_warnings(reports):
"""Helper for enabling all warnings"""
if reports:
for rep in reports:
rep.show_warning = True
def main(opt):
"""./moosedocs check"""
# Enable/disable different reports
kwargs = {'app_report':'app' in opt.reports,
'doc_report':'doc' in opt.reports,
'req_report':'req' in opt.reports}
# Change to a silent handler
logger = logging.getLogger('MooseDocs')
logger.handlers = list()
logger.addHandler(moosesqa.SilentRecordHandler())
# Create the SQAReport objects
doc_reports, req_reports, app_reports = moosesqa.get_sqa_reports(opt.config, **kwargs)
# Limit the reports
if opt.app_reports and app_reports:
app_reports = [report for report in app_reports if report.title in opt.app_reports]
if opt.req_reports and req_reports:
req_reports = [report for report in req_reports if report.title in opt.req_reports]
# Apply --generate option
if opt.generate:
print("The --generate option has been replaced by./moosedocs.py generate.")
# Apply --dump option
if opt.dump:
print("The --dump option has been replaced by./moosedocs.py syntax.")
# Apply 'show_warnings' option
if opt.show_warnings:
_enable_warnings(app_reports)
_enable_warnings(doc_reports)
_enable_warnings(req_reports)
# Execute and display reports
status = _print_reports('MooseApp', app_reports, 0)
status = _print_reports('Document', doc_reports, status)
status = _print_reports('Requirement', req_reports, status)
return status > 1 # 0 - PASS; 1-WARNING; 2-ERROR (Only ERROR is a failure)
|
harterj/moose
|
python/MooseDocs/commands/check.py
|
Python
|
lgpl-2.1
| 4,072
|
[
"MOOSE"
] |
48b9920dae42308955cd23b6bcc2cb91083f7285607514ca9611a6041121e03c
|
"""
Performs depth-first search on the graph specified by infile and outputs the biconnected and strongly-connected
components.
"""
__author__ = 'Tom'
TREE, FORWARD, BACK, CROSS = 0, 1, 2, 3
class Node:
def __init__(self, value):
self.value = value
self.d = None
self.f = None
self.parent = None
self.adjacent_nodes = list()
self.visited = False
self.low = None
self.articulation = False
self.separators = 0
self.descendants = 0
def __repr__(self):
return repr(self.value)
class DFS:
def __init__(self, nodes):
self.nodes = nodes
self.edges = dict()
def search(self):
for u in self.nodes:
u.visited = False
u.parent = None
self.time = 0
self.scc = dict()
self.num_components = 0
self.bcc_edges = list()
self.bccs = dict()
self.num_bcc = 0
for u in sorted(self.nodes, key=lambda x: x.f, reverse=True):
if not u.visited:
self.scc[self.num_components] = list((u,))
self.visit(u)
self.num_components += 1
if u.separators < 2:
u.articulation = False
def visit(self, u):
u.visited = True
self.time += 1
u.d = self.time
u.low = u.d
for v in u.adjacent_nodes:
if not v.visited:
self.edges[(u, v)] = TREE
self.scc[self.num_components].append(v)
self.bcc_edges.append((u, v,))
v.parent = u
self.visit(v)
u.descendants += 1 + v.descendants
if v.low >= u.d:
self.bccs[self.num_bcc] = list()
done = False
while not done:
e = self.bcc_edges.pop()
self.bccs[self.num_bcc].append(e)
if e == (u, v,):
done = True
self.num_bcc += 1
u.articulation = True
u.separators += 1
u.low = min(u.low, v.low)
elif u.parent != v and v.d < u.d:
# (u, v) is a back edge from u to its ancestor v
self.bcc_edges.append((u, v,))
u.low = min(u.low, v.d)
self.time += 1
u.f = self.time
if __name__ == "__main__":
import argparse
from algorithms.graph_algorithms.readgraph import Graph
# create the top-level parser
parser = argparse.ArgumentParser(description='Performs depth-first search amongst nodes in a graph.')
# add arguments
parser.add_argument('infile', type=argparse.FileType())
# parse arguments
args = parser.parse_args()
# read in edge weights from file
g = Graph()
g.read_graph(args.infile)
nodes = [Node(chr(i)) for i in range(ord('a'), ord('a') + g.num_nodes)]
for i, j in g.edge_weights.keys():
nodes[i].adjacent_nodes.append(nodes[j])
dfs = DFS(nodes)
dfs.search()
# output node discovery and finish times
d, f = "", ""
for node in nodes:
d += str(node.d).ljust(10)
f += str(node.f).ljust(10)
print "V".ljust(10) + "".join(map(lambda x: x.value.ljust(10), nodes))
print "d[V]".ljust(10) + d
print "f[V]".ljust(10) + f
# output articulation pts.
print "\nArticulation Pts.: %s" % ", ".join(map(lambda u: u.value, filter(lambda u: u.articulation, nodes)))
# output biconnected components
print "\nNo. of biconnected components: %d" % dfs.num_bcc
for i, edges in dfs.bccs.iteritems():
vertices = list()
for u, v in edges:
if u.value not in vertices:
vertices.append(u.value)
if v.value not in vertices:
vertices.append(v.value)
print "%d: %s" % (i + 1, ", ".join(vertices),)
# output classification of edges
print "\nClassification of edges:"
for u in nodes:
for v in u.adjacent_nodes:
if (u, v,) not in dfs.edges or dfs.edges[(u, v)] != TREE:
if u.d < v.d <= u.d + u.descendants:
dfs.edges[(u, v)] = FORWARD
elif v.d < u.d <= v.d + v.descendants:
dfs.edges[(u, v)] = BACK
else:
dfs.edges[(u, v)] = CROSS
for edge, classification in dfs.edges.iteritems():
if not classification:
classification = "Tree"
elif classification < 2:
classification = "Forward"
elif classification < 3:
classification = "Back"
else:
classification = "Cross"
print "%s is a %s edge." % (edge, classification,)
# output strongly-connected components
for node in nodes:
node.adjacent_nodes = list()
for i, j in g.edge_weights.keys():
nodes[j].adjacent_nodes.append(nodes[i])
dfs.search()
print "\nNo. of strongly-connected components: %d" % dfs.num_components
for i, vertices in dfs.scc.iteritems():
print "%d: %s" % (i + 1, ", ".join(map(lambda x: x.value, vertices)),)
|
tjtrebat/algorithms
|
algorithms/graph_algorithms/dfs/dfs.py
|
Python
|
gpl-2.0
| 5,205
|
[
"VisIt"
] |
304f0744f839b802690f04b15acc780569a2bfd13ec9fa703af358fde331cf40
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lints the docstring of an API symbol."""
import ast
import inspect
import re
import textwrap
from typing import Optional, Any, List, Tuple
import astor
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator.pretty_docs import base_page
from tensorflow_docs.api_generator.report.schema import api_report_generated_pb2 as api_report_pb2
def _get_source(py_object: Any) -> Optional[str]:
if py_object is not None:
try:
source = textwrap.dedent(inspect.getsource(py_object))
return source
except Exception: # pylint: disable=broad-except
return None
return None
def _count_empty_param(items: List[Tuple[str, Optional[str]]]) -> int:
count = 0
for name, description in items:
del name
if description is None or description.strip() == '':
count += 1
return count
def lint_params(page_info: base_page.PageInfo) -> api_report_pb2.ParameterLint:
"""Lints the parameters of a docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `DescriptionLint` proto object.
"""
param_lint = api_report_pb2.ParameterLint()
reserved_keywords = frozenset(['self', 'cls', '_cls'])
if page_info.py_object is not None:
try:
sig = inspect.signature(page_info.py_object)
args_in_code = sig.parameters.keys()
num_args_in_code = len(args_in_code)
for arg in args_in_code:
if arg in reserved_keywords:
num_args_in_code -= 1
break
param_lint.num_args_in_code = num_args_in_code
except (ValueError, TypeError):
param_lint.num_args_in_code = 0
else:
param_lint.num_args_in_code = 0
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
if part.title.lower().startswith('arg'):
param_lint.num_args_in_doc = len(part.items)
param_lint.num_empty_param_desc_args = _count_empty_param(part.items)
if part.title.lower().startswith('attr'):
param_lint.total_attr_param = len(part.items)
param_lint.num_empty_param_desc_attr = _count_empty_param(part.items)
return param_lint
def lint_description(
page_info: base_page.PageInfo) -> api_report_pb2.DescriptionLint:
"""Lints the description of a docstring.
If a field in the proto is assigned 0, then it means that that field doesn't
exist.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `DescriptionLint` proto object.
"""
len_brief = 0
if page_info.doc.brief:
len_brief = len(page_info.doc.brief.split())
len_long_desc = 0
for part in page_info.doc.docstring_parts:
if not isinstance(part, parser.TitleBlock):
len_long_desc += len(part.split())
return api_report_pb2.DescriptionLint(
len_brief=len_brief, len_long_desc=len_long_desc)
_EXAMPLE_RE = re.compile(
r"""
(?P<indent>\ *)(?P<content>```.*?\n\s*?```)
""", re.VERBOSE | re.DOTALL)
def lint_usage_example(
page_info: base_page.PageInfo) -> api_report_pb2.UsageExampleLint:
"""Counts the number of doctests and untested examples in a docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `UsageExampleLint` proto object.
"""
description = []
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
description.append(str(part))
else:
description.append(part)
desc_str = ''.join(description)
num_doctest = 0
num_untested_examples = 0
# The doctests are wrapped in backticks (```).
for match in _EXAMPLE_RE.finditer(desc_str):
if '>>>' in match.groupdict()['content']:
num_doctest += 1
else:
num_untested_examples += 1
return api_report_pb2.UsageExampleLint(
num_doctest=num_doctest, num_untested_examples=num_untested_examples)
class ReturnVisitor(ast.NodeVisitor):
"""Visits the Returns node in an AST."""
def __init__(self) -> None:
self.total_returns = []
def visit_Return(self, node) -> None: # pylint: disable=invalid-name
if node.value is None:
self.total_returns.append('None')
else:
self.total_returns.append(astor.to_source(node.value))
def lint_returns(
page_info: base_page.PageInfo) -> Optional[api_report_pb2.ReturnLint]:
""""Lints the returns/yields block in the docstring.
This linter only checks if a `Returns`/`Yields` block exists in the docstring
if it finds `return`/`yield` keyword in the source code.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `ReturnLint` proto object.
"""
source = _get_source(page_info.py_object)
return_visitor = ReturnVisitor()
if source is not None:
try:
return_visitor.visit(ast.parse(source))
except Exception: # pylint: disable=broad-except
pass
keywords = ('return', 'yield')
if source is not None and any(word in source for word in keywords):
for item in page_info.doc.docstring_parts:
if isinstance(item, parser.TitleBlock):
if item.title.lower().startswith(keywords):
return api_report_pb2.ReturnLint(returns_defined=True)
# If "Returns"/"Yields" word is present in the brief docstring then having
# a separate `Returns`/`Yields` section is not needed.
if page_info.doc.brief.lower().startswith(keywords):
return api_report_pb2.ReturnLint(returns_defined=True)
# If the code only returns None then `Returns` section in the docstring is
# not required.
if all(return_val == 'None' for return_val in return_visitor.total_returns):
return None
return api_report_pb2.ReturnLint(returns_defined=False)
return None
class RaiseVisitor(ast.NodeVisitor):
"""Visits the Raises node in an AST."""
def __init__(self) -> None:
self.total_raises = []
def visit_Raise(self, node) -> None: # pylint: disable=invalid-name
# This `if` block means that there is a bare raise in the code.
if node.exc is None:
return
self.total_raises.append(astor.to_source(node.exc.func).strip())
def lint_raises(page_info: base_page.PageInfo) -> api_report_pb2.RaisesLint:
"""Lints the raises block in the docstring.
The total raises in code are extracted via an AST and compared against those
extracted from the docstring.
Args:
page_info: A `PageInfo` object containing the information of a page
generated via the api generation.
Returns:
A filled `RaisesLint` proto object.
"""
raises_lint = api_report_pb2.RaisesLint()
# Extract the raises from the source code.
raise_visitor = RaiseVisitor()
source = _get_source(page_info.py_object)
if source is not None:
try:
raise_visitor.visit(ast.parse(source))
except Exception: # pylint: disable=broad-except
pass
raises_lint.total_raises_in_code = len(raise_visitor.total_raises)
# Extract the raises defined in the docstring.
raises_defined_in_doc = []
for part in page_info.doc.docstring_parts:
if isinstance(part, parser.TitleBlock):
if part.title.lower().startswith('raises'):
raises_lint.num_raises_defined = len(part.items)
if part.items:
raises_defined_in_doc.extend(list(zip(*part.items))[0])
break
else:
raises_lint.num_raises_defined = 0
return raises_lint
|
tensorflow/docs
|
tools/tensorflow_docs/api_generator/report/linter.py
|
Python
|
apache-2.0
| 8,253
|
[
"VisIt"
] |
069343620e66c931ca4421be29f96d30102cc1ec9cad5597da6e52941b440561
|
#!/usr/bin/env python
import os
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkMINCImageReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/t3_grid_0.mnc")
reader.RescaleRealValuesOn()
attributes = vtk.vtkMINCImageAttributes()
image = reader
# The current directory must be writeable.
#
try:
channel = open("minc1.mnc", "wb")
channel.close()
minc1 = vtk.vtkMINCImageWriter()
minc1.SetInputConnection(reader.GetOutputPort())
minc1.SetFileName("minc1.mnc")
attributes.ShallowCopy(reader.GetImageAttributes())
attributes.SetAttributeValueAsString(
"patient", "full_name", "DOE^JOHN DAVID")
minc2 = vtk.vtkMINCImageWriter()
minc2.SetImageAttributes(attributes)
minc2.SetInputConnection(reader.GetOutputPort())
minc2.SetFileName("minc2.mnc")
minc3 = vtk.vtkMINCImageWriter()
minc3.SetImageAttributes(attributes)
minc3.AddInputConnection(reader.GetOutputPort())
minc3.AddInputConnection(reader.GetOutputPort())
minc3.SetFileName("minc3.mnc")
minc1.Write()
minc2.Write()
minc3.Write()
reader2 = vtk.vtkMINCImageReader()
reader2.SetFileName("minc3.mnc")
reader2.RescaleRealValuesOn()
reader2.SetTimeStep(1)
reader2.Update()
image = reader2
# cleanup
#
try:
os.remove("minc1.mnc")
except OSError:
pass
try:
os.remove("minc2.mnc")
except OSError:
pass
try:
os.remove("minc3.mnc")
except OSError:
pass
# Write out the file header for coverage
attributes.PrintFileHeader()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(image.GetOutputPort())
viewer.SetColorWindow(100)
viewer.SetColorLevel(0)
viewer.Render()
except IOError:
print("Unable to test the writer/reader.")
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/IO/MINC/Testing/Python/TestMINCImageWriter.py
|
Python
|
bsd-3-clause
| 1,971
|
[
"VTK"
] |
2fbf378152fe563910070a74394d1f4e1bfef24eda6c18b6922461d7d8e3d626
|
#!/usr/bin/env python
# RegCM postprocessing tool
# Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from read import RegCMReader, CRUReader
from plot import Plotter
usage = """usage: ./app.py 'model_file_pattern' model_nc_variable 'observ_glob_pattern' observ_nc_variable
Parameters:
'model file pattern': a glob pattern for one or more netCDF files made from RegCM program (in apostrophes)
model_nc_variable: a variable from the RegCM netCDF files
'observ_glob_pattern': a glob pattern for one or more CRU netCDF files (also in apostrophes)
model_nc_variable: a variable from the CRU netCDF files
Example:
./app.py 'data/Africa_SRF.1970*.nc' t2m 'obs/CRUTMP.CDF' TMP
"""
if __name__ == "__main__":
if len(sys.argv) != 5:
print usage
sys.exit(1)
pattern = sys.argv[1]
nc_var = sys.argv[2]
obs_pattern = sys.argv[3]
obs_nc_var = sys.argv[4]
r = RegCMReader(pattern)
value = r.get_value(nc_var).mean()
time_limits = value.get_limits('time')
crd_limits = value.get_latlonlimits()
obs_r = CRUReader(obs_pattern)
obs_value = obs_r.get_value(obs_nc_var, imposed_limits={'time': time_limits}, latlon_limits=crd_limits).mean()
if obs_nc_var == "TMP":
obs_value.to_K()
value.regrid(obs_value.latlon)
diff = obs_value - value
plt = Plotter(diff)
plt.plot(levels = (-5, 5))
plt.show()
plt.save('image', format='png')
plt.close()
|
ansobolev/regCMPostProc
|
src/app.py
|
Python
|
gpl-3.0
| 2,109
|
[
"NetCDF"
] |
66b14c7141840354d703580a4db499d88347e0880ce189f73706760d39088314
|
from questionnaire.models import Questionnaire, Section, SubSection, Question, QuestionGroup, QuestionOption, \
QuestionGroupOrder
questionnaire = Questionnaire.objects.get(name="JRF Core English", description="From dropbox as given by Rouslan")
section_1 = Section.objects.create(order=4, questionnaire=questionnaire, name="Routine Coverage",
title="Immunization and Vitamin A Coverage <br/> National Administrative Coverage for the Year 2013")
sub_section = SubSection.objects.create(order=1, section=section_1, title="Administrative coverage")
question1 = Question.objects.create(text="Vaccine/Supplement", export_label='Vaccine or supplement name',
is_primary=True,
UID='C00048', answer_type='MultiChoice',
instructions="Please complete separately for each vaccine, even if they are given in combination (e.g., if Pentavalent vaccine DTP-HepB-Hib is used, fill in the data for DTP3, HepB3 and Hib3)")
QuestionOption.objects.create(text="BCG", question=question1)
QuestionOption.objects.create(text="HepB, birth dose (given within 24 hours of birth)", question=question1,
instructions="Provide ONLY hepatitis B vaccine doses given within 24 hours of birth. If time of birth is unknown, please provide doses of hepatitis B vaccine given within first day of life. (For example, if the infant is born on day 0, include all HepB does given on days 0 and 1.) This indicator is NOT equivalent to HepB1")
QuestionOption.objects.create(text="DTP1", question=question1)
QuestionOption.objects.create(text="DTP3", question=question1)
QuestionOption.objects.create(text="Polio3 (OPV or IPV)", question=question1,
instructions="This refers to the third dose of polio vaccine, excluding polio 0 (zero), if such a dose is included in the national schedule.")
QuestionOption.objects.create(text="HepB3", question=question1,
instructions="""In countries using monovalent vaccine for all doses, this refers to the third dose of hepatitis B vaccine, including the birth dose, if such a dose is included in the national schedule.<br/>
In countries that are using monovalent vaccine for the birth dose and combination vaccine for the subsequent doses, HepB3 will refer to the third dose of the combination vaccine in addition to the birth dose.""")
QuestionOption.objects.create(text="Hib3", question=question1)
QuestionOption.objects.create(text="Pneumococcal conjugate vaccine 1st dose", question=question1)
QuestionOption.objects.create(text="Pneumococcal conjugate vaccine 2nd dose", question=question1)
QuestionOption.objects.create(text="Pneumococcal conjugate vaccine 3rd dose", question=question1)
QuestionOption.objects.create(text="Rotavirus 1st dose", question=question1)
QuestionOption.objects.create(text="Rotavirus last dose (2nd or 3rd depending on schedule)", question=question1)
QuestionOption.objects.create(text="MCV1 (measles-containing vaccine, 1st dose)", question=question1,
instructions="Measles-containing vaccine (MCV) includes measles vaccine, measles-rubella vaccine, measles-mumps-rubella vaccine, etc. Fill in the rows for both MCV and rubella vaccines even if they were given in combination.")
QuestionOption.objects.create(text="Rubella 1 (rubella-containing vaccine)", question=question1,
instructions="Measles-containing vaccine (MCV) includes measles vaccine, measles-rubella vaccine, measles-mumps-rubella vaccine, etc. Fill in the rows for both MCV and rubella vaccines even if they were given in combination.")
QuestionOption.objects.create(text="MCV2 (measles-containing vaccine, 2nd dose)", question=question1,
instructions="Measles-containing vaccine (MCV) includes measles vaccine, measles-rubella vaccine, measles-mumps-rubella vaccine, etc. Fill in the rows for both MCV and rubella vaccines even if they were given in combination.")
QuestionOption.objects.create(text="Vitamin A, 1st dose", question=question1)
QuestionOption.objects.create(text="Japanese encephalitis vaccine", question=question1)
QuestionOption.objects.create(text="Tetanus toxoid-containing vaccine (TT2+) ", question=question1)
QuestionOption.objects.create(text="Protection at birth (PAB) against neonatal tetanus", question=question1,
instructions="This refers to children who are protected at birth (PAB) against neonatal tetanus by their mother's TT status; this information is collected during the DTP1 visit - a child is deemed protected if the mother has received 2 doses of TT in the last pregnancy or at-least 3 doses of TT in previous years. If the country does not calculate PAB, leave the cells blank.")
question2 = Question.objects.create(text="Description of the denominator used in coverage calculation",
UID='C00049', answer_type='MultiChoice')
QuestionOption.objects.create(text="live birth", question=question2)
QuestionOption.objects.create(text="surviving infants", question=question2)
QuestionOption.objects.create(text="less than 59 months", question=question2)
QuestionOption.objects.create(text="12 - 59 months", question=question2)
QuestionOption.objects.create(text="6 - 59 months", question=question2)
QuestionOption.objects.create(text="pregnant women", question=question2,
instructions="The number of live births can be used as a proxy for the total number of pregnant women.")
question3 = Question.objects.create(text="Number in target group(denominator)",
export_label='Number in target group (denominator)',
UID='C00050', answer_type='Number', )
question4 = Question.objects.create(text="Number of doses administered through routine services (numerator)",
export_label='Number of doses administered through routine services (numerator)',
UID='C00051', answer_type='Number')
question5 = Question.objects.create(text="Percent coverage (=C/B*100)", UID='C00052', answer_type='Number',
export_label='Percent coverage')
parent1 = QuestionGroup.objects.create(subsection=sub_section, order=1, display_all=True, grid=True)
parent1.question.add(question1, question2, question3, question3, question4, question5)
QuestionGroupOrder.objects.create(question=question1, question_group=parent1, order=1)
QuestionGroupOrder.objects.create(question=question2, question_group=parent1, order=2)
QuestionGroupOrder.objects.create(question=question3, question_group=parent1, order=3)
QuestionGroupOrder.objects.create(question=question4, question_group=parent1, order=4)
QuestionGroupOrder.objects.create(question=question5, question_group=parent1, order=5)
sub_section2 = SubSection.objects.create(order=2, section=section_1, title="Accuracy of administrative coverage",
description="Administrative coverage estimates can be biased by inaccurate numerators and/or denominators. Use this space to describe any factors limiting the accuracy of the coverage estimates entered in the table above. Some common problems are listed here. Numerators may be underestimated because of incomplete reporting from reporting units or the exclusion of other vaccinating sources, such as the private sector and NGOs; or overestimated because of over-reporting from reporting units, for example, when other target groups are included. Denominators may have problems arising from population movements, inaccurate census estimations or projections, or multiple sources of data.")
question21 = Question.objects.create(text="Describe any factors limiting the accuracy of the numerator: ",
export_label='Factors limiting the accuracy of the numerator',
UID='C00053', answer_type='Text')
question22 = Question.objects.create(
text="Describe any factors limiting the accuracy of the denominator: (denominator = number in target group)",
export_label='Factors limiting the accuracy of the denominator',
UID='C00054', answer_type='Text')
parent2 = QuestionGroup.objects.create(subsection=sub_section2, order=1)
parent2.question.add(question21)
QuestionGroupOrder.objects.create(question=question21, question_group=parent2, order=1)
parent3 = QuestionGroup.objects.create(subsection=sub_section2, order=2)
parent3.question.add(question22)
QuestionGroupOrder.objects.create(question=question22, question_group=parent3, order=1)
sub_section3 = SubSection.objects.create(order=3, section=section_1, title="Completeness of district level reporting",
description="This table collects information about the completeness of district reporting, i.e., the main reporting system which produced the numbers in the previous table on vaccine coverage. The number of expected reports is equal to the number of districts multiplied by the number of reporting periods in the year")
question31 = Question.objects.create(
text="Total number of district reports expected at the national level from all districts across repording periods in 2013 (e.g., # districts x 12 months)",
export_label='Total number of district reports expected at the national level from all districts across repording periods in report year',
UID='C00055', answer_type='Number')
question32 = Question.objects.create(
text="Total number of district reports actually received at the national level from all districts across reporting periods in 2013",
export_label='Total number of district reports actually received at the national level from all districts across reporting periods in report year',
UID='C00056', answer_type='Number')
parent4 = QuestionGroup.objects.create(subsection=sub_section3, order=1)
parent4.question.add(question31)
QuestionGroupOrder.objects.create(question=question31, question_group=parent4, order=1)
parent5 = QuestionGroup.objects.create(subsection=sub_section3, order=2)
parent5.question.add(question32)
QuestionGroupOrder.objects.create(question=question32, question_group=parent5, order=1)
sub_section4 = SubSection.objects.create(order=4, section=section_1, title="HPV Vaccine Doses administered: 2013",
description="Report the number of HPV vaccinations given to females by their age at time of administration for each of the three recommended doses of HPV vaccine. If age is unknown but can be estimated, report for the estimated age. For example, if vaccination is offered exclusively to girls in the 6th school form and most girls in the 6th school form are eleven years of age, vaccinations by dose may be reported as vaccinations for girls eleven years of age.")
question41 = Question.objects.create(text="Vaccine administered (age in years)", UID='C00057',
answer_type='MultiChoice', is_primary=True)
QuestionOption.objects.create(text="9", question=question41)
QuestionOption.objects.create(text="10", question=question41)
QuestionOption.objects.create(text="11", question=question41)
QuestionOption.objects.create(text="12", question=question41)
QuestionOption.objects.create(text="13", question=question41)
QuestionOption.objects.create(text="14", question=question41)
QuestionOption.objects.create(text="15+", question=question41)
QuestionOption.objects.create(text="unknown age", question=question41)
question42 = Question.objects.create(text="1st dose", UID='C00058', answer_type='Number', export_label='1st dose')
question43 = Question.objects.create(text="2d dose", UID='C00059', answer_type='Number', export_label=' 2nd dose')
question44 = Question.objects.create(text="3d dose", UID='C00060', answer_type='Number', export_label='3rd dose')
parent7 = QuestionGroup.objects.create(subsection=sub_section4, order=1, grid=True, display_all=True)
parent7.question.add(question41, question42, question43, question44)
QuestionGroupOrder.objects.create(question=question41, question_group=parent7, order=1)
QuestionGroupOrder.objects.create(question=question42, question_group=parent7, order=2)
QuestionGroupOrder.objects.create(question=question43, question_group=parent7, order=3)
QuestionGroupOrder.objects.create(question=question44, question_group=parent7, order=4)
sub_section5 = SubSection.objects.create(order=5, section=section_1, title="Accuracy of reported HPV Vaccine Doses")
question51 = Question.objects.create(text="Describe any factors limiting the accuracy of the administered doses",
UID='C00061', answer_type='Text')
parent8 = QuestionGroup.objects.create(subsection=sub_section5, order=1)
parent8.question.add(question51)
QuestionGroupOrder.objects.create(question=question51, question_group=parent8, order=1)
sub_section6 = SubSection.objects.create(order=6, section=section_1,
title="Seasonal Influenza Vaccine Doses Administered",
description="In an updated position paper (2012), WHO recommends that countries considering the initiation or expansion of seasonal influenza vaccination programmes give the highest priority to pregnant women. Additional risk groups to be considered for vaccination, in no particular order of priority, are: children aged 6-59 months; the elderly; individuals with specific chronic medical conditions; and healthcare workers. Report immunization coverage in this table using data collected from vaccination clinics/sites on the number of doses administered for each of the risk groups that are included in the country-specific policy for seasonal influenza vaccination. ")
question61 = Question.objects.create(text="Description of target population", UID='C00062', answer_type='MultiChoice',
export_label='target population', is_primary=True)
QuestionOption.objects.create(text="Children 6-23 months", question=question61)
QuestionOption.objects.create(text="Children >=24 months up to 9 years", question=question61)
QuestionOption.objects.create(text="Elderly (please specify minimum age under explanatory comments)",
question=question61)
QuestionOption.objects.create(text="Pregnant women", question=question61)
QuestionOption.objects.create(text="Health care workers", question=question61)
QuestionOption.objects.create(text="Persons with chronic diseases ", question=question61)
# instruction = (e.g. respiratory, cardiac, liver and renal diseases; neurodevelopmental, immunological and haematological disorders, diabetes; obesity etc.)
QuestionOption.objects.create(text="Others)", question=question61)
#instruction = (may include various other groups: poultry workers, subnational levels, government officials, adults, etc
question62 = Question.objects.create(text="Number in target group (denominator)", UID='C00063', answer_type='Number',
export_label='Number in target group (denominator)')
question63 = Question.objects.create(text="Number of doses administered through routine services (numerator)",
UID='C00064', answer_type='Number',
export_label='Doses administered through routine services (numerator)')
question64 = Question.objects.create(text="Percent coverage (=C/B*100)", UID='C00065', answer_type='Number',
export_label='Percent coverage')
parent6 = QuestionGroup.objects.create(subsection=sub_section6, order=1, grid=True, display_all=True)
parent6.question.add(question61, question62, question63, question64)
QuestionGroupOrder.objects.create(question=question61, question_group=parent6, order=1)
QuestionGroupOrder.objects.create(question=question62, question_group=parent6, order=2)
QuestionGroupOrder.objects.create(question=question63, question_group=parent6, order=3)
QuestionGroupOrder.objects.create(question=question64, question_group=parent6, order=4)
############################################ GENERATE FIXTURES
# questionnaires = Questionnaire.objects.all()
# sections = Section.objects.all()
# subsections = SubSection.objects.all()
# questions = Question.objects.all()
# question_groups = QuestionGroup.objects.all()
# options = QuestionOption.objects.all()
# orders = QuestionGroupOrder.objects.all()
# data = serializers.serialize("json", [questionnaires])
# print data
# data = serializers.serialize("json", [sections])
# print data
# data = serializers.serialize("json", [subsections])
# print data
#
# data = serializers.serialize("json", [questions])
# print data
#
# data = serializers.serialize("json", [question_groups])
# print data
#
# data = serializers.serialize("json", [options, orders])
# print data
|
eJRF/ejrf
|
questionnaire/fixtures/questionnaire/section_4a.py
|
Python
|
bsd-3-clause
| 17,043
|
[
"VisIt"
] |
fb60a74be97373e1f41e50125d610177785a72f89d1195f3fe9664b29b3c6571
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.analysis.elasticity.elastic import ElasticTensor
from pymatgen.analysis.interfaces.substrate_analyzer import SubstrateAnalyzer
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
class SubstrateAnalyzerTest(PymatgenTest):
# Clean up test to be based on test structures
def test_init(self):
# Film VO2
film = SpacegroupAnalyzer(self.get_structure("VO2"), symprec=0.1).get_conventional_standard_structure()
# Substrate TiO2
substrate = SpacegroupAnalyzer(self.get_structure("TiO2"), symprec=0.1).get_conventional_standard_structure()
film_elac = ElasticTensor.from_voigt(
[
[324.32, 187.3, 170.92, 0.0, 0.0, 0.0],
[187.3, 324.32, 170.92, 0.0, 0.0, 0.0],
[170.92, 170.92, 408.41, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 150.73, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 150.73, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 238.74],
]
)
s = SubstrateAnalyzer()
matches = list(s.calculate(film, substrate, film_elac))
self.assertEqual(len(matches), 192)
for match in matches:
assert match is not None
assert isinstance(match.match_area, float)
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/analysis/interfaces/tests/test_substrate_analyzer.py
|
Python
|
mit
| 1,490
|
[
"pymatgen"
] |
e5f14b55d9a7ffb360087f08f9e0a215a7bd147e993d89f684264d14dd77d558
|
usage="""a module storing the methods to build priors.
print p[i]
This includes:
priors on strain decomposed into gaussian terms used in the analytic marginalization over all possible signals
priors on angular position, such as the galactic plane
"""
print """WARNING:
helpstrings are not necessarily accurate. UPDATE THESE
implement a few other priors on h?
Jeffrey's prior?
truncated pareto amplitudes
"""
#=================================================
import utils
np = utils.np
linalg = utils.linalg
hp = utils.hp
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.rcParams.update({"text.usetex":True})
#=================================================
#
# Prior Classes
#
#=================================================
#================================================
# prior on strain
#=================================================
class hPrior(object):
"""
An object representing the prior on strain.
We analytically marginalize over all possible signals with gaussian integrals, and therefore decompose the prior into a sum of gaussians.
This appears to work well for Pareto distributions with lower bounds.
"""
###
def __init__(self, freqs=None, means=None, covariance=None, amplitudes=None, n_freqs=1, n_gaus=1, n_pol=2, byhand=False):
"""
Priors are assumed to have form \sum_over N{
C_N(f) * exp( - conj( h_k(f) - mean_k(f)_N ) * Z_kj(f)_N * ( h_j(f) - mean_j(f)_N ) ) }
We require:
*freqs is a 1-D array
np.shape(freqs) = (num_freqs,)
*means is a 3-D array or a scalar
np.shape(means) = (num_freqs, num_pol, num_gaus)
*covariance is a 4-D array or a scalar
np.shape(covariance) = (num_freqs, num_pol, num_pol, num_gaus)
*amplitudes is a 1-D array or a scalar
np.shape(amplitudes) = (num_gaus,)
if any of the above are a scalar (except freqs, which must be an array), they are cast to the correct shape.
If not enough information is provided to determine the shape of these arrays, we default to the optional arguments
n_gaus, n_pol, n_freqs
otherwise these are ignored
"""
### set up placeholders that will be filled
self.n_freqs = None
self.n_pol = None
self.n_gaus = None
self.freqs = None
self.means = None
self.covariance = None
self.amplitudes = None
### set data
if freqs != None:
self.set_freqs(freqs)
if means != None:
self.set_means(means, n_freqs=n_freqs, n_pol=n_pol, n_gaus=n_gaus)
if covariance != None:
self.set_covariance(covariance, n_freqs=n_freqs, n_pol=n_pol, n_gaus=n_gaus, byhand=byhand)
if amplitudes != None:
self.set_amplitudes(amplitudes, n_gaus=n_gaus)
###
def set_freqs(self, freqs):
""" check and set freqs """
if not isinstance(freqs, (np.ndarray)):
freqs = np.array(freqs)
if len(np.shape(freqs)) != 1:
raise ValueError, "bad shape for freqs"
n_freqs = len(freqs)
if not n_freqs:
raise ValueError, "freqs must have at least 1 entry"
if self.n_freqs and (n_freqs != self.n_freqs):
raise ValueError, "inconsistent n_freqs"
self.n_freqs = n_freqs
self.freqs = freqs
self.df = freqs[1]-freqs[0]
###
def set_means(self, means, n_freqs=1, n_pol=2, n_gaus=1):
""" check and set means. n_freqs, n_gaus, n_pol are only used if they are not already defined within the object """
if isinstance(means, (int, float)): ### scalar means
if self.n_freqs:
n_freqs = self.n_freqs
if self.n_gaus:
n_gaus = self.n_gaus
if self.n_pol:
n_pol = self.n_pol
self.means = means * np.ones((n_freqs, n_pol, n_gaus), complex)
self.n_freqs = n_freqs
self.n_gaus = n_gaus
self.n_pol = n_pol
else: ### vector means
if not isinstance(means, np.ndarray):
means = np.array(means)
if len(np.shape(means)) != 3:
raise ValueError, "bad shape for means"
n_freqs, n_pol, n_gaus = np.shape(means)
if self.n_freqs and (n_freqs != self.n_freqs):
raise ValueError, "inconsistent n_freqs"
if self.n_pol and (n_pol != self.n_pol):
raise ValueError, "inconsistent n_pol"
elif n_pol <= 0:
raise ValueError, "must have a positive definite number of polarizations"
if self.n_gaus and (n_gaus != self.n_gaus):
raise ValueError, "inconsistent n_gaus"
self.means = means
self.n_gaus = n_gaus
self.n_pol = n_pol
self.n_freqs = n_freqs
###
def set_covariance(self, covariance, n_freqs=1, n_pol=2, n_gaus=1, byhand=False):
""" check and set covariance. n_freqs, n_gaus, n_pol are only used if they are not already defined within the object """
if isinstance(covariance, (int,float)): ### scalar covariances
if self.n_freqs:
n_freqs = self.n_freqs
if self.n_gaus:
n_gaus = self.n_gaus
if self.n_pol:
n_pol = self.n_pol
self.covariance = np.zeros((n_freqs, n_pol, n_pol, n_gaus), complex)
for i in xrange(n_pol):
self.covariance[:,i,i,:] = covariance
self.n_freqs = n_freqs
self.n_gaus = n_gaus
self.n_pol = n_pol
else: ### vector covariances
if not isinstance(covariance, np.ndarray):
covariance = np.array(covariance)
if len(np.shape(covariance)) != 4:
raise ValueError, "bad shape for covariance"
n_freqs, n_pol, n_p, n_gaus = np.shape(covariance)
if self.n_freqs and (n_freqs != self.n_freqs):
raise ValueError, "shape mismatch between freqs and covariance"
if n_pol != n_p:
raise ValueError, "inconsistent shape within covariance"
if self.n_pol and (n_pol != self.n_pol):
raise ValueError, "inconsistent n_pol"
if self.n_gaus and (n_gaus != self.n_gaus):
raise ValueError, "inconsistent n_gaus"
self.covariance = covariance
self.n_freqs = n_freqs
self.n_gaus = n_gaus
self.n_pol = n_pol
### set up inverse-covariance and det_invcovariance
self.invcovariance = np.zeros_like(covariance, dtype=complex)
self.detinvcovariance = np.zeros((n_freqs, n_gaus), dtype=complex)
for n in xrange(n_gaus):
if byhand:
a = self.covariance[:,0,0,n]
b = self.covariance[:,0,1,n]
c = self.covariance[:,1,0,n]
d = self.covariance[:,1,1,n]
det = a*d-b*c
self.detinvcovariance[:,n] = 1.0/det
self.invcovariance[:,0,0,n] = d/det
self.invcovariance[:,0,1,n] = -b/det
self.invcovariance[:,1,0,n] = -c/det
self.invcovariance[:,1,1,n] = a/det
else:
invc = linalg.inv(self.covariance[:,:,:,n])
self.invcovariance[:,:,:,n] = invc
self.detinvcovariance[:,n] = linalg.det(invc)
###
def set_amplitudes(self, amplitudes, n_gaus=1):
""" check and set amplitudes """
if isinstance(amplitudes, (int, float)):
if self.n_gaus:
n_gaus = self.n_gaus
self.amplitudes = amplitudes * np.ones((n_gaus,), float)
self.n_gaus = n_gaus
else:
if not isinstance(amplitudes, np.ndarray):
amplitudes = np.array(amplitudes)
if len(np.shape(amplitudes)) != 1:
raise ValueError, "bad shape for amplitudes"
n_gaus = len(amplitudes)
if self.n_gaus and (n_gaus != self.n_gaus):
raise ValueError, "inconsistent n_gaus"
self.amplitudes = amplitudes
self.n_gaus = n_gaus
###
def get_amplitudes(self, **kwargs):
"""
simply returns self.amplitudes. Only hear to allow backwards compatibility with child classes that will do more complicated things (via **kwargs)
"""
return self.amplitudes
###
def lognorm(self, freq_truth):
"""
computes the proper normalization for this prior assuming a model (freq_truth)
return log(norm)
WARNING: this factor will act as an overall scale on the posterior (constant for all pixels) and is only important for the evidence
==> getting this wrong will produce the wrong evidence
"""
return np.log( self.norm(freq_truth) ) ### assumes individual kernals are normalized.
# # det|Z| df**n_pol sum over freqs use amplitudes
# return -utils.sum_logs(np.sum(np.log(self.detinvcovariance[freq_truth]) + self.n_pol*np.log(self.df), axis=0), coeffs=self.amplitudes)
###
def norm(self, freq_truth):
"""
computes the proper normalizatoin for this prior assuming a model (freq_truth)
"""
return np.sum(self.amplitudes)
# return np.exp(self.lognorm(freq_truth))
###
def __call__(self, h):
"""
evaluates the prior for the strain "h"
this call sums h into h_rss and uses the univariate decomposition
we expect this to hold for a marginalized distribution on the vector {h}
returns prior
We require:
*h is a 2-D array
np.shape(h) = (self.n_freqs, self.n_pol)
if h is a 1-D array, we check to see if the shape matches either n_freqs or n_pol.
if it does, we broadcast it to the correct 2-D array
if h is a scalar, we broadcast it to the correct 2-D array
computes:
sum C[n] * np.exp( - np.conj(h-means) * incovariance * (h-means) )
"""
# print "WARNING: normalizations for this prior are all messed up. Take these plots with a grain of salt."
### make sure h has the expected shape
if isinstance(h, (int, float)): ### h is a scalar
h = h * np.ones((self.n_freqs, self.n_pol), float)
elif not isinstance(h, np.ndarray):
h = np.array(h)
h_shape = np.shape(h)
nD = len(h_shape)
if nD == 1: ### h is a 1-D array
len_h = len(h)
if len_h == self.n_pol: ### broadcast to n_freq x n_pol
h = np.outer(np.ones((self.n_freqs,),float), h)
elif len_h == self.n_freqs: ### broadcast to n_freq x n_pol
h = np.outer(h, np.ones((self.n_pol,),float))
else:
raise ValueError, "bad shape for h"
elif nD == 2: ### h is a 2-D array
if (self.n_freqs, self.n_pol) != h_shape:
raise ValueError, "bad shape for h"
else:
raise ValueError, "bad shape for h"
### compute prior evaluated for this strain
p = 0.0
for n in xrange(self.n_gaus): ### sum over all gaussian terms
d = h - self.means[:,:,n] ### difference from mean values
dc = np.conj(d)
m = self.invcovariance[:,:,:,n] ### covariance matricies
### compute exponential term
e = np.zeros_like(self.freqs, float)
for i in xrange(self.n_pol): ### sum over all polarizations
for j in xrange(self.n_pol):
e -= np.real(dc[:,i] * m[:,i,j] * d[:,j]) ### we expect this to be a real number, so we cast it to reals
### insert into prior array
mean_variance = np.exp( -np.mean( np.log( self.detinvcovariance[:,n] )*(1.0/self.n_pol) ) )
p += self.amplitudes[n] * np.exp( np.sum(e)*self.df ) / (2*np.pi*mean_variance)**0.5
return p
###
def plot(self, figname, xmin=1, xmax=10, npts=1001, ymin=None, ymax=None, grid=False):
"""
generate a plot of the prior and save it to figname
"""
### generate plot
fig_ind = 0
fig = plt.figure(fig_ind)
ax = plt.subplot(1,1,1)
x = np.logspace(np.log10(xmin),np.log10(xmax),npts)/self.df
p = np.array([self(X/(self.n_freqs*self.n_pol)**0.5) for X in x])
ax.loglog(x, p )
ax.set_xlabel("$\log_{10}(h_{rss})$")
ax.set_ylabel("$p(h)$")
ax.grid(grid, which="both")
ax.set_xlim(xmin=xmin, xmax=xmax)
if ymin:
ax.set_ylim(ymin=ymin)
if ymax:
ax.set_ylim(ymax=ymax)
fig.savefig(figname)
plt.close(fig)
###
def __repr__(self):
return self.__str__()
###
def __str__(self):
s = """priors.hPrior object
min{freqs}=%.5f
max{freqs}=%.5f
No. freqs =%d
No. polarizations =%d
No. gaussians =%d"""%(np.min(self.freqs), np.max(self.freqs), self.n_freqs, self.n_pol, self.n_gaus)
return s
#=================================================
class hPrior_pareto(hPrior):
"""
an extension of hPrior that is built around a pareto decomposition
automatically finds the best amplitudes, etc
"""
###
def __init__(self, a, variances, freqs=None, n_freqs=1, n_gaus=1, n_pol=2, byhand=False):
self.a = a
self.variances = variances
means, covariances, amplitudes = pareto(a, n_freqs, n_pol, variances, exact=False)
super(hPrior_pareto, self).__init__(freqs=freqs, means=means, covariance=covariances, amplitudes=amplitudes, n_freqs=n_freqs, n_gaus=n_gaus, n_pol=n_pol, byhand=False)
###
def get_amplitudes(self, freq_truth=np.array([True]), n_pol_eff=2):
"""
finds the pareto ampltidues using n_freqs, n_pol
this method always finds the exact decomposition for the specified freq_truth and n_pol_eff
if that behavior is not desired, then you should use the base hPrior object and reset amplitudes by hand
"""
n_freqs = np.sum(freq_truth)
return pareto_amplitudes(self.a, self.variances, n_freqs=n_freqs, n_pol=n_pol_eff, exact=True)
###
def __repr__(self):
return self.__str__()
###
def __str__(self):
s = """priors.hPrior_pareto object
a = %.3f
min{freqs}=%.5f
max{freqs}=%.5f
No. freqs =%d
No. polarizations =%d
No. gaussians =%d"""%(self.a, np.min(self.freqs), np.max(self.freqs), self.n_freqs, self.n_pol, self.n_gaus)
return s
#=================================================
# prior on sky location
#=================================================
class angPrior(object):
"""
An object representing the prior on sky location.
this prior is stored in terms of the standard polar coordinates
*theta : polar angle
*phi : azimutha angle
both theta and phi are measured in radians, and refer to standard Earth-Fixed coordinates
"""
known_prior_type = ["uniform", "galactic_plane", "antenna_pattern"]
# right now the default option is uniform over the sky
# will want to add some sort of beam pattern option
# eventually should add galaxy catalogues option
###
def __init__(self, nside_exp=7, prior_type='uniform', coord_sys="E", **kwargs):
"""
Initializes a prior with HEALPix decomposition defined by
nside = 2**nside_exp
prior_type defines the prior used:
uniform [DEFAULT]
constant independent of theta,phi
galactic_plane
a gaussian in galactic latitude
antenna_pattern
the maximum eigenvalue of the sensitivity matrix
kwargs can be:
gmst : float [GreenwichMeanSidereelTime]
used to convert between Earth-fixed and Galactic coordinates
network : instance of utils.Network object
used to compute antenna_pattern prior
frequency : float [Hz]
used to compute eigenvalues of sensitivity matrix for antenna pattern prior
exp : float
if prior_type=="galactic_plane":
width of gaussian in galactic lattitude (in radians)
DEFAULT = np.pi/8
if prior_type=="antenna_pattern":
p ~ (F+^2 + Fx^2)**(exp/2.0)
DEFAULT = 3.0
we may want to add:
galaxy catalogs (this is why gmst is an optional argument)
"""
### delegate to set methods
self.set_nside(nside_exp)
self.set_prior_type(prior_type, **kwargs)
self.set_theta_phi(coord_sys="E", **kwargs)
###
def set_nside(self, nside_exp):
""" check and set nside """
if not isinstance(nside_exp, int):
raise ValueError, "nside_exp must be an integer"
nside = 2**nside_exp
self.nside = nside
self.npix = hp.nside2npix(nside)
###
def set_prior_type(self, prior_type, **kwargs):
""" check and set prior type """
# Initialize prior type
if not (prior_type in self.known_prior_type):
raise ValueError, "Unknown prior_type=%s"%prior_type
self.prior_type = prior_type
# store information needed to calculate prior for specific types
if prior_type == "galactic_plane":
if kwargs.has_key("gmst"):
self.gmst = kwargs["gmst"]
else:
raise ValueError, "must supply \"gmst\" with prior_type=\"galactic_plane\""
if kwargs.has_key("exp"):
self.exp = kwargs["exp"]
else:
self.exp = np.pi/8
elif prior_type == "antenna_pattern":
if kwargs.has_key("network"):
network = kwargs["network"]
if not isinstance(network, utils.Network):
raise ValueError, "network must be an instance of utils.Network"
self.network = network
else:
raise ValueError, "must supply \"network\" with prior_type=\"antenna_pattern\""
if kwargs.has_key("frequency"):
self.frequency = kwargs["frequency"]
else:
raise ValueError, "must supply \"frequency\" with prior_type=\"antenna_pattern\""
if kwargs.has_key("exp"):
self.exp = kwargs["exp"]
else:
self.exp = 3.0
###
def set_theta_phi(self, coord_sys="E", **kwargs):
"""
compute and store theta, phi
delegates to utils.set_theta_phi
"""
if not self.nside:
raise ValueError, "set_angPrior() first"
self.coord_sys=coord_sys
theta, phi = utils.set_theta_phi(self.nside, coord_sys=coord_sys, **kwargs)
self.theta = theta
self.phi = phi
###
def angprior(self, normalize=False):
"""
builds the normalized prior over the entire sky.
if normalize:
we ensure the prior is normalized by directly computing the sum
"""
if not self.nside:
raise ValueError, "set_nside() first"
#Pixelate the sky
npix = hp.nside2npix(self.nside) #number of pixels
### an array for sky positions
if self.theta == None:
raise ValueError, "set_theta_phi() first"
### compute prior for all points in the sky
angprior = self(self.theta, self.phi)
if normalize:
angprior /= np.sum(angprior) ### ensure normalization
return angprior
###
def __call__(self, theta, phi, degrees=False):
"""
evalute the prior at the point defined by
theta, phi
"""
if isinstance(theta, (int,float)):
theta = np.array([theta])
elif not isinstance(theta, np.ndarray):
theta = np.array(theta)
if isinstance(phi, (int,float)):
phi = np.array([phi])
if not isinstance(phi, np.ndarray):
phi = np.array(phi)
if len(phi) != len(theta):
raise ValueError, "theta, phi must have the same length"
if degrees: ### convert to radians
theta *= np.pi/180
phi *= np.pi/180
### check theta, phi for sanity
if np.any((theta<0.0)*(theta>np.pi)):
raise ValueError, "theta must be between 0 and pi"
if np.any((phi<0.0)*(phi>2*np.pi)):
raise ValueError, "phi must be between 0 and 2*pi"
### compute prior
if self.prior_type == "uniform":
return np.ones_like(theta)/hp.nside2npix(self.nside)
elif self.prior_type == "galactic_plane":
### need to convert from Earth-fixed -> galactic an apply gaussian prior on galactice latitude
### expect self.gmst, self.exp to exist
###
# need to define a way to convert from earth-fixed to galactic coordinate
# maybe astropy is a good solution?
###
print "WARNING: prior_type=\"galactic_plane\" is not implemented. Defaulting to \"uniform\""
return np.ones_like(theta)/hp.nside2npix(self.nside)
elif self.prior_type == "antenna_pattern":
### need to compute max eigenvalue of sensitivity matrix (at some nominal frequency) and raise it so a given power
### expect self.network, self.exp, self.frequency to exist
###
# WARNING: this implementation is likely to be slow! It can probably be optimized
###
prior = np.empty((len(theta),),float)
A = self.network.A(theta, phi, 0.0, no_psd=False)
evals = np.max(np.linalg.eigvals(A), axis=2)
for ipix in xrange(len(theta)):
prior[ipix] = np.interp(self.frequency, self.network.freqs, evals[ipix])**(self.exp/2.0)
return prior
else:
raise ValueError, "unknown prior_type=%s"%self.prior_type
###
def plot(self, figname, title=None, unit=None, inj=None, est=None, graticule=False):
"""
generate a plot of the prior and save it to figname
if inj != None:
(theta,phi) = inj
plot marker at theta,phi
"""
### generate plot
fig_ind = 0
fig = plt.figure(fig_ind)
hp.mollview(self.angprior(normalize=True)/hp.nside2pixarea(self.nside), title=title, unit=unit, flip="geo", fig=fig_ind)
if graticule:
hp.graticule()
### plot point if supplied
if inj:
ax = fig.gca()
marker = ax.projplot(inj, "wx", alpha=0.5)[0]
marker.set_markersize(10)
marker.set_markeredgewidth(2)
if est:
ax = fig.gca()
marker = ax.projplot(est, "wo", alpha=0.5)[0]
marker.set_markersize(10)
marker.set_markeredgewidth(2)
### save
fig.savefig(figname)
plt.close()
###
def __repr__(self):
return self.__str__()
###
def __str__(self):
s = """priors.angPrior object
nside = %d
npix = %d
prior_type = %s"""%(self.nside, self.npix, self.prior_type)
if self.prior_type == "galactic_plane":
s += """
exp = %.3f
gmst = %.5f"""%(self.exp, self.gmst)
elif self.prior_type == "antenna_pattern":
s += """
exp = %.3f
frequency = %.5f
network = %s"""%(self.exp, self.frequency, str(self.network))
return s
#=================================================
#
# Polarization constraints
#
#=================================================
#=================================================
# methods for generic constraints
#=================================================
def isotropic_to_constrained(covariances, alpha, psi, theta, r=1e-10):
"""
converts an isotropic (in polarization space) covariances to one with polarization constraints defined by
h1 = tan(alpha)*e**(i*psi) * h2 = P * h2
for example,
linearly polarized in known frame :
alpha = alpha_o
psi = 0.0
elliptically polarized in known frame :
alpha = alpha_o
psi = np.pi/2
if we know there are such constrains, but do not know the correct frame, etc, we can numerically marginalize over (alpha, psi)
we work in the orthogonal basis
/ a \ = / cos(alpha) sin(alpha)*e**(i*psi) \ / h1 \
| | = (1+|P|)**-0.5 * | | | |
\ b / \ -sin(alpha)*e**(-i*phi) cos(alpha) / \ h2 /
and expect the inverse-covariance in this basis to be
/ 1/v 0 \
invcov = | |
\ 0 1/(r*v) /
where v is the isotropic covariance : cov[i,i] = v
In the h1,h2 basis, this means our inverse-covariance becomes
/ cos(alpha)**2/v + sin(alpha)**2/(r*v) sin(alpha)*cos(alpha)*e**(i*psi)*(1 - 1/r)/v \
invcov = | |
\ sin(alpha)*cos(alpha)*e**(-i*psi)*(1 - 1/r)/v sin(alpha)**2/v + cos(alpha)**2/(r*v) /
we also apply the rotation matrix
/ sin(theta) cos(theta) \
R = | | = transpose(R)
\ cos(theta) -sin(theta) /
to rotate h_1,h_2 into some other frame, which results in the final invcov matrix
rotated_invcov = R * invcov * R
returns constrained_covariances
"""
### check covariances
if not isinstance(covariances, np.ndarray):
covariances = np.array(covariances)
if len(np.shape(covariances)) != 4:
raise ValueError, "bad shape for covariances"
n_freqs, n_pol, n_p, n_gaus = np.shape(covariances)
if n_pol != n_p:
raise ValueError, "inconsistent shape within covariances"
if n_pol != 2:
raise ValueError, "We only support polarization constrains for n_pol=2"
### construce constrained covariances
constrained_covariances = np.zeros_like(covariances, complex)
cosalpha = np.cos(alpha)
sinalpha = np.sin(alpha)
cospsi = np.cos(psi)
sinpsi = np.sin(psi)
costheta = np.cos(theta)
sintheta = np.sin(theta)
### iterate over all covariance matricies and convert
for f in xrange(n_freqs):
for g in xrange(n_gaus):
cov = covariances[f,:,:,g]
### check that cov is diagonal and isotropic
if (cov[0,0] != cov[1,1]) or cov[0,1] or cov[1,0]:
raise ValueError, "we only support conversion of diagonal, isotropic covariance matrices"
v = cov[1,1] ### pull out variance
### compute constrained inverse-covariance
constrained_invcov = np.empty_like(cov)
a = cosalpha**2/v + sinalpha**2/(r*v)
b = sinalpha*cosalpha*(cospsi + 1.0j*sinpsi)*(1-1.0/r)/v
c = sinalpha*cosalpha*(cospsi - 1.0j*sinpsi)*(1-1.0/r)/v
d = sinalpha**2/v + cosalpha**2/(r*v)
### apply rotation matrix
constrained_invcov[0,0] = a*sintheta**2 + b*sintheta*costheta +c*sintheta*costheta + d*costheta**2
constrained_invcov[0,1] = a*sintheta*costheta - b*sintheta**2 + c*costheta**2 - d*sintheta*costheta
constrained_invcov[1,0] = a*sintheta*costheta + b*costheta**2 - c*sintheta**2 - d*sintheta*costheta
constrained_invcov[1,1] = a*costheta**2 - b*sitheta*costheta - c*sintheta*costheta + d*sintheta**2
### fill in constrained_covariance
constrained_covariances[f,:,:,g] = linalg.inv(constrained_invcov)
return constrained_covariances
###
def istoropic_to_margenalized(covariances, alpha, psi, theta, r=1e-10):
"""
expands covariances to numerically marginalize over (alpha,psi,theta)
n_gaus -> n_gaus*len(alpha)*len(psi)*len(theta)
constrained_covariances are computed and stored appropriately
"""
### check covariances
if not isinstance(covariances, np.ndarray):
covariances = np.array(covariances)
if len(np.shape(covariances)) != 4:
raise ValueError, "bad shape for covariances"
n_freqs, n_pol, n_p, n_g = np.shape(covariances)
if n_pol != n_p:
raise ValueError, "inconsistent shape within covariances"
if n_pol != 2:
raise ValueError, "We only support polarization constrains for n_pol=2"
### check alpha
if isinstance(alpha, (int,float)):
alpha = np.array([alpha])
elif not isinstance(alpha, np.ndarray):
alpha = np.array(alpha)
if len(np.shape(alpha)) != 1:
raise ValueError, "bad shape for alpha"
n_alpha = len(alpha)
### check psi
if isinstance(psi, (int,float)):
psi = np.array([psi])
elif not isinstance(psi, np.ndarray):
psi = np.array(psi)
if len(np.shape(psi)) != 1:
raise ValueError, "bad shape for psi"
n_psi = len(psi)
### check theta
if isinstance(theta, (int,float)):
theta = np.array([theta])
elif not isinstance(theta, np.ndarray):
theta = np.array(theta)
if len(np.shape(theta)) != 1:
raise ValueError, "bad shape for theta"
n_theta = len(theta)
### new number of gaussians
n_gaus = n_g*n_alpha*n_psi*n_theta
### construct constrained_covariances
constrained_covariances = np.empty((n_freqs, n_pol, n_pol, n_gaus), complex)
### iterate, compute, and fill in constrained_covariances
ind = 0
for a in alpha:
for p in phi:
for t in theta:
constraind_covariances[:,:,:,ind*n_g:(ind+1)*n_g] = isotropic_to_constrained(covariances, a, p, t, r=r) ### fill in appropriately
ind += 1
return constrained_covariances
###
def marginalized_amplitudes(amplitudes, alpha, psi, theta):
"""
returns amplitudes appropriately broadcast for isotropic_to_marginalized()
"""
### check amplitudes
if not isinstance(amplitudes, np.ndarray):
amplitudes = np.array(amplitudes)
if len(np.shape(amplitudes)) != 1:
raise ValueError, "bad shape for amplitudes"
n_g = len(amplitudes)
### check alpha
if isinstance(alpha, (int,float)):
alpha = np.array([alpha])
elif not isinstance(alpha, np.ndarray):
alpha = np.array(alpha)
if len(np.shape(alpha)) != 1:
raise ValueError, "bad shape for alpha"
n_alpha = len(alpha)
### check psi
if isinstance(psi, (int,float)):
psi = np.array([psi])
elif not isinstance(psi, np.ndarray):
psi = np.array(psi)
if len(np.shape(psi)) != 1:
raise ValueError, "bad shape for psi"
n_psi = len(psi)
### check theta
if isinstance(theta, (int,float)):
theta = np.array([theta])
elif not isinstance(theta, np.ndarray):
theta = np.array(theta)
if len(np.shape(theta)) != 1:
raise ValueError, "bad shape for theta"
n_theta = len(theta)
### define new amplitudes
marginalized_amplitudes = np.flatten( np.outer( amplitudes, np.ones((n_alpha*n_psi*n_theta),float)/(n_alpha*n_psi*n_theta) ) )
return marginalized_amplitudes
###
def marginalizd_means(means, alpha, psi, theta):
"""
returns means appropriately broadcast for isotropic_to_marginalized()
"""
### check means
if not isinstance(means, np.ndarray):
means = np.array(means)
if len(np.shape(means)) != 3:
raise ValueError, "bad shape for means"
n_freqs, n_pol, n_g = np.shape(means)
if n_pol != 2:
raise ValueError, "We only support polarization constrains for n_pol=2"
### check alpha
if isinstance(alpha, (int,float)):
alpha = np.array([alpha])
elif not isinstance(alpha, np.ndarray):
alpha = np.array(alpha)
if len(np.shape(alpha)) != 1:
raise ValueError, "bad shape for alpha"
n_alpha = len(alpha)
### check psi
if isinstance(psi, (int,float)):
psi = np.array([psi])
elif not isinstance(psi, np.ndarray):
psi = np.array(psi)
if len(np.shape(psi)) != 1:
raise ValueError, "bad shape for psi"
n_psi = len(psi)
### check theta
if isinstance(theta, (int,float)):
theta = np.array([theta])
elif not isinstance(theta, np.ndarray):
theta = np.array(theta)
if len(np.shape(theta)) != 1:
raise ValueError, "bad shape for theta"
n_theta = len(theta)
### new number of gaussians
n_gaus = n_g*n_alpha*_n_psi*n_theta
### define new means
marginalized_means = np.empty((n_freqs,n_pol,n_gaus),complex)
### iterate and fill in
ind = 0
for a in alpha:
for p in phi:
for t in theta:
marginalized_means[:,:,ind*n_g:(ind+1)*n_g] = means
ind += 1
return marginalized_means
#=================================================
# methods for specific constraints
#=================================================
###
def isotropic_to_circular(means, covariances, amplitudes, r=1e-10, n_theta_marge=60):
"""
converts an isotropic covariances to circularly polarized covariances
delegates to isotropic_to_constrained() with
alpha = np.pi/4
psi = np.pi/2
delegates to marginalized_means(), isotropic_to_constrained(), marginalized_amplitudes()
returns means, covariances, amplitudes
"""
theta = np.arange(n_theta_marge)*2*np.pi/n_theta_marge
return marginalized_means(means, np.pi/4, np.pi/2, theta), isotropic_to_constrained(covariances, np.pi/4, np.pi/2, theta, r=r), marginalized_amplitudes(amplitudes, np.pi/4, np.pi/2, theta)
###
def isotropic_to_elliptical(means, covariances, amplitudes, r=1e-10, n_alpha_marge=60, n_theta_marge=60):
"""
converts an isotropic covariances to elliptical covariances
numerically marginalizes over alpha with n_marge samples
n_gaus -> n_gaus*n_marge
delegates to marginalized_means(), isotropic_to_marginalized(), marginalized_amplitudes()
returns means, covariances, amplitudes
"""
theta = np.arange(n_theta_marge)*2*np.pi/n_theta_marge
alpha = np.arange(n_alpha_marge)*2*np.pi/n_alpha_marge
return marginalized_means(means, alpha, np.pi/2, theta), isotropic_to_marginalized(covariances, alpha, np.pi/2, theta, r=r), marginalized_amplitudes(amplitudes, alpha, np.pi/2, theta)
###
def isotropic_to_linear(means, covariances, amplitudes, r=1e-10, n_alpha_marge=60, n_theta_marge=60):
"""
converts an isotropic covariances to linear covariances
numerically marginalizes over alpha with n_marge samples
n_gaus -> n_gaus*n_marge
delegates to marginalized_means(), isotropic_to_marginalized(), marginalized_amplitudes()
returns means, covariances, amplitudes
"""
theta = np.arange(n_theta_marge)*2*np.pi/n_theta_marge
alpha = np.arange(n_alpha_marge)*2*np.pi/n_alpha_marge
return marginalized_means(means, alpha, 0.0, theta), isotropic_to_marginalized(covariances, alpha, 0.0, theta, r=r), marginalized_amplitudes(amplitudes, alpha, 0.0, theta)
#=================================================
#
# Methods to compute standard priors
#
#=================================================
def malmquist_pareto(a, n_freqs, n_pol, variances, break_variance):
"""
computes a "malmquist" pareto distribution, which drives the prior to zero as x->0 with a single gaussian term with variance=break_variance
delegates to pareto_amps(variances) to obtain the decomposition into a series of gaussians
appends a single gaussian at the beginning (typically break_variance < variances)
"""
if not isinstance(variances, np.ndarray):
variances = np.array(variances)
if len(np.shape(variances)) != 1:
raise ValueError, "bad shape for variances"
n_gaus = len(variances)+1
### compute amplitudes
amplitudes = pareto_amplitudes(a, variances, n_pol=n_pol)
### compute covariance in correct array format
covariances = np.zeros((n_freqs,n_pol,n_pol,n_gaus),float)
for i in xrange(n_pol): ### input diagonal elements
covariances[:,i,i,0] = 2*break_variance
for n in xrange(1,n_gaus):
v = 2*variances[n-1]
for i in xrange(n_pol):
covariances[:,i,i,n] = v
### instantiate means in correct array format
means = np.zeros((n_freqs, n_pol, n_gaus), float)
### figure out the correct amplitude to cancel the rest of the terms as x->0
p_0 = np.sum(amplitudes*variances**-0.5) ### the value of all the prior terms at x=0
break_amp = p_0 * break_variance**0.5 ### the amplitude required for the malmquist term to cancel the rest of the prior at x=0
return means, covariances, np.concatenate((np.array([break_amp]),amplitudes))
###
def pareto(a, n_freqs, n_pol, variances, exact=False):
"""
computes the required input for a Prior object using the pareto distribution
p(h_rss) = h_rss**-a
delegates decomposition into gaussians to pareto_amplitudes
returns amplitudes, means, covariance
"""
if not isinstance(variances, np.ndarray):
variances = np.array(variances)
if len(np.shape(variances)) != 1:
raise ValueError, "bad shape for variances"
n_gaus = len(variances)
### compute amplitudes
amplitudes = pareto_amplitudes(a, variances, n_freqs=n_freqs, n_pol=n_pol, exact=exact)
### compute covariance in correct array format
covariances = np.zeros((n_freqs,n_pol,n_pol,n_gaus),float)
for n in xrange(n_gaus):
v = 2*variances[n]
for i in xrange(n_pol):
covariances[:,i,i,n] = v
### instantiate means in correct array format
means = np.zeros((n_freqs, n_pol, n_gaus), float)
return means, covariances, amplitudes
###
def pareto_amplitudes(a, variances, n_freqs=1, n_pol=1, exact=False):
"""
computes the amplitudes corresponding to the supplied variances to optimally reconstruct a pareto distribution with exponent "a"
p(x) = x**-a ~ \sum_n C_n * (2*pi*variances[n])**-0.5 * exp( -x**2/(2*variances[n]) )
We require:
*variances is a 1-D array
np.shape(variances) = (N,)
returns C_n as a 1-D array
amplitudes are defined (up to an arbitrary constant) through a chi2-minimization
chi2 = \int dx [ (f - fhat) / f ]**2
where
f = x**-a
fhat = \sum C[n] * (2*pi*variances[n])**-0.5 * exp( -x**2/(2*variances[n]) )
a minimization with respect to C_n yields
int x**a Km = sum C_n int x**(2a) K_n K_m
v_m**(a/2) I(a) = sum C_n v_mn**((1+2a)/2) (v_m*v_n)**(-1) Y(a) where I(a) and Y(a) are non-dimensional integrals
v_mn = v_m*v_m / (v_m + v_n)
The C_n are determined through straightforward linear algebra
"""
if not isinstance(variances, np.ndarray):
variances = np.array(variances)
if len(np.shape(variances)) != 1:
raise ValueError, "bad shape for variances"
n_gaus = len(variances)
if not exact: ### make an approximation
### REFERENCE THEORY.TEX FOR EXPLANATION FOR WHY THIS IS REASONABLE.
### ASSUMES WIDELY SPACED VARIANCES
C_n = variances**(-0.5*(a-1))
else: ### return the exact result
M = np.empty((n_gaus,n_gaus),float)
for i in xrange(n_gaus):
vi = variances[i]
# M[i,i] = 2**(0.5 - a - 2*n_freqs*n_pol) * vi**(a - 0.5)
logvi = np.log(vi)
M[i,i] = (0.5 - a - 2*n_freqs*n_pol)*np.log(2) + (a-0.5)*logvi ### we deal with logs because it is more accurate
for j in xrange(i+1, n_gaus):
vj = variances[j]
vij = vi*vj/(vi+vj)
# M[i,j] = M[j,i] = (vij**2/(vi*vj))**(n_freqs*n_pol) * vij**(a-0.5) ### variances are small enough that this is required for off-diagonal terms to be finite
logvj = np.log(vj)
M[i,j] = M[j,i] = (n_freqs*n_pol*2 + a - 0.5)*np.log(vij) - n_freqs*n_pol*(logvi + logvj)
M = np.exp( M - np.max(M) ) ### remove common factor because it won't influence C_n and may improve accuracy
C_n = np.sum(linalg.inv(M)*variances**(0.5*a), axis=0)
C_n /= np.sum(C_n)
return C_n
'''
### Distribution for normalized univariate kernals:
### we may want to handle the small numbers more carefully, because we start to run into problems with float precision (setting things to zero).
### we can do something like utils.sum_logs where we subtract out the maximum value, and then do the manipulation, only to put in the maximum value at the end.
### for right now, this appears to work well enough.
### build matrix from RHS
M = np.empty((n_gaus, n_gaus), float)
for m in xrange(n_gaus):
v_m = variances[m]
M[m,m] = 2**-(0.5+a) * v_m**(a-0.5)
for n in xrange(m+1, n_gaus):
v_n = variances[n]
M[n,m] = M[m,n] = (v_m+v_n)**-(0.5+a) * (v_m*v_n)**a
### invert matrix from RHS
invM = linalg.inv(M)
### compute coefficients
vec = variances**(0.5*a)
### take the inverse and matrix product
C_n = np.sum( invM*vec, axis=1) ### take the inverse and matrix product
### normalize coefficients?
C_n /= np.sum(C_n)
return C_n
'''
|
reedessick/bayesburst
|
priors.py
|
Python
|
gpl-2.0
| 39,548
|
[
"Galaxy",
"Gaussian"
] |
f4485cbb8b4d9bd5df9def180daa09ab801a4388d3b4958bada6fa7aa171a4ed
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Unrestricted coupled pertubed Hartree-Fock solver
'''
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
def solve(fvind, mo_energy, mo_occ, h1, s1=None,
max_cycle=20, tol=1e-9, hermi=False, verbose=logger.WARN):
'''
Args:
fvind : function
Given density matrix, compute (ij|kl)D_{lk}*2 - (ij|kl)D_{jk}
'''
if s1 is None:
return solve_nos1(fvind, mo_energy, mo_occ, h1,
max_cycle, tol, hermi, verbose)
else:
return solve_withs1(fvind, mo_energy, mo_occ, h1, s1,
max_cycle, tol, hermi, verbose)
kernel = solve
# h1 shape is (:,nvir,nocc)
def solve_nos1(fvind, mo_energy, mo_occ, h1,
max_cycle=20, tol=1e-9, hermi=False, verbose=logger.WARN):
'''For field independent basis. First order overlap matrix is zero'''
log = logger.new_logger(verbose=verbose)
t0 = (time.clock(), time.time())
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
nocca = numpy.count_nonzero(occidxa)
noccb = numpy.count_nonzero(occidxb)
nvira = mo_occ[0].size - nocca
nvirb = mo_occ[1].size - noccb
e_ai = numpy.hstack(((mo_energy[0][viridxa,None]-mo_energy[0][occidxa]).ravel(),
(mo_energy[1][viridxb,None]-mo_energy[1][occidxb]).ravel()))
e_ai = 1 / e_ai
mo1base = numpy.hstack((h1[0].reshape(-1,nvira*nocca),
h1[1].reshape(-1,nvirb*noccb)))
mo1base *= -e_ai
def vind_vo(mo1):
v = fvind(mo1.reshape(mo1base.shape)).reshape(mo1base.shape)
v *= e_ai
return v.ravel()
mo1 = lib.krylov(vind_vo, mo1base.ravel(),
tol=tol, max_cycle=max_cycle, hermi=hermi, verbose=log)
log.timer('krylov solver in CPHF', *t0)
if isinstance(h1[0], numpy.ndarray) and h1[0].ndim == 2:
mo1 = (mo1[:nocca*nvira].reshape(nvira,nocca),
mo1[nocca*nvira:].reshape(nvirb,noccb))
else:
mo1 = mo1.reshape(mo1base.shape)
mo1_a = mo1[:,:nvira*nocca].reshape(-1,nvira,nocca)
mo1_b = mo1[:,nvira*nocca:].reshape(-1,nvirb,noccb)
mo1 = (mo1_a, mo1_b)
return mo1, None
# h1 shape is (:,nvir+nocc,nocc)
def solve_withs1(fvind, mo_energy, mo_occ, h1, s1,
max_cycle=20, tol=1e-9, hermi=False, verbose=logger.WARN):
'''For field dependent basis. First order overlap matrix is non-zero.
The first order orbitals are set to
C^1_{ij} = -1/2 S1
e1 = h1 - s1*e0 + (e0_j-e0_i)*c1 + vhf[c1]
'''
log = logger.new_logger(verbose=verbose)
t0 = (time.clock(), time.time())
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
nocca = numpy.count_nonzero(occidxa)
noccb = numpy.count_nonzero(occidxb)
nmoa, nmob = mo_occ[0].size, mo_occ[1].size
nvira = nmoa - nocca
nvirb = nmob - noccb
eai_a = mo_energy[0][viridxa,None] - mo_energy[0][occidxa]
eai_b = mo_energy[1][viridxb,None] - mo_energy[1][occidxb]
s1_a = s1[0].reshape(-1,nmoa,nocca)
nset = s1_a.shape[0]
s1_b = s1[1].reshape(nset,nmob,noccb)
hs_a = mo1base_a = h1[0].reshape(nset,nmoa,nocca) - s1_a * mo_energy[0][occidxa]
hs_b = mo1base_b = h1[1].reshape(nset,nmob,noccb) - s1_b * mo_energy[1][occidxb]
mo_e1_a = hs_a[:,occidxa].copy()
mo_e1_b = hs_b[:,occidxb].copy()
mo1base_a[:,viridxa]/= -eai_a
mo1base_b[:,viridxb]/= -eai_b
mo1base_a[:,occidxa] = -s1_a[:,occidxa] * .5
mo1base_b[:,occidxb] = -s1_b[:,occidxb] * .5
eai_a = 1. / eai_a
eai_b = 1. / eai_b
mo1base = numpy.hstack((mo1base_a.reshape(nset,-1), mo1base_b.reshape(nset,-1)))
def vind_vo(mo1):
v = fvind(mo1).reshape(mo1base.shape)
v1a = v[:,:nmoa*nocca].reshape(nset,nmoa,nocca)
v1b = v[:,nmoa*nocca:].reshape(nset,nmob,noccb)
v1a[:,viridxa] *= eai_a
v1b[:,viridxb] *= eai_b
v1a[:,occidxa] = 0
v1b[:,occidxb] = 0
return v.ravel()
mo1 = lib.krylov(vind_vo, mo1base.ravel(),
tol=tol, max_cycle=max_cycle, hermi=hermi, verbose=log)
log.timer('krylov solver in CPHF', *t0)
v1mo = fvind(mo1).reshape(mo1base.shape)
v1a = v1mo[:,:nmoa*nocca].reshape(nset,nmoa,nocca)
v1b = v1mo[:,nmoa*nocca:].reshape(nset,nmob,noccb)
mo1 = mo1.reshape(mo1base.shape)
mo1_a = mo1[:,:nmoa*nocca].reshape(nset,nmoa,nocca)
mo1_b = mo1[:,nmoa*nocca:].reshape(nset,nmob,noccb)
mo1_a[:,viridxa] = mo1base_a[:,viridxa] - v1a[:,viridxa] * eai_a
mo1_b[:,viridxb] = mo1base_b[:,viridxb] - v1b[:,viridxb] * eai_b
mo_e1_a += mo1_a[:,occidxa] * (mo_energy[0][occidxa,None] - mo_energy[0][occidxa])
mo_e1_b += mo1_b[:,occidxb] * (mo_energy[1][occidxb,None] - mo_energy[1][occidxb])
mo_e1_a += v1mo[:,:nmoa*nocca].reshape(nset,nmoa,nocca)[:,occidxa]
mo_e1_b += v1mo[:,nmoa*nocca:].reshape(nset,nmob,noccb)[:,occidxb]
if isinstance(h1[0], numpy.ndarray) and h1[0].ndim == 2:
mo1_a, mo1_b = mo1_a[0], mo1_b[0]
mo_e1_a, mo_e1_b = mo_e1_a[0], mo_e1_b[0]
return (mo1_a, mo1_b), (mo_e1_a, mo_e1_b)
|
gkc1000/pyscf
|
pyscf/scf/ucphf.py
|
Python
|
apache-2.0
| 5,875
|
[
"PySCF"
] |
81c4fccbc9f07eea2840881150d97fc30f67bb549ae805e55a18d1c8a4da5a16
|
"""
@file
@brief Missing information about licenses
"""
missing_module_licenses = {
"xgboost": "Apache-2",
"actuariat_python": "MIT",
"code_beatrix": "MIT",
"ensae_teaching_cs": "MIT",
"pymyinstall": "MIT",
"pyensae": "MIT",
"pyquickhelper": "MIT",
"pymmails": "MIT",
"pyrsslocal": "MIT",
"pysqllike": "MIT",
"spyre": "MIT",
"dataspyre": "MIT",
"ete": "GNU v3",
"ete3": "GNU v3",
"datrie": "LGPL v2.1",
"glueviz": "BSD",
"h5py": "BSD",
"statsmodels": "BSD",
"toolz": "BSD",
"trackpy": "BSD - 3 clauses",
"jupyter_core": "BSD",
"jupyter_client": "BSD",
"metakernel": "BSD",
"scilab_kernel": "BSD",
"pyexecjs": "MIT",
"selenium": "Apache Software License",
"alabaster": "BSD",
"cloud_sptheme": "BSD",
"colorama": "BSD",
"guzzle_sphinx_theme": "BSD~MIT",
"itcase_sphinx_theme": "no license",
"pypiserver": "BSD",
"solar_theme": "BSD~MIT",
"sphinx_bootstrap_theme": "MIT",
"sphinx_py3doc_enhanced_theme": "BSD",
"sphinx_readable_theme": "MIT",
"sphinxjp.themes.basicstrap": "MIT",
"sphinxjp.themes.sphinxjp": "MIT",
"wild_sphinx_theme": "BSD",
"SQLAlchemy": "MIT",
"graphviz": "MIT",
"anyjson": "BSD",
"brewer2mpl ": "MIT",
"cached_property": "BSD",
"celery": "BSD",
"colorspacious": "MIT",
"cgal_bindings": "Boost Software License 1.0",
"Cython": "Apache Software License",
"datashape": "BSD",
"cubehelix": "~BSD - 2 clauses",
"dbfread": "MIT",
"dynd": "BSD",
"fabric": "BSD",
"feedparser": "~BSD - 2 clauses",
"gmpy2": "LGPLv3+",
"itsdangerous": "BSD",
"kombu": "BSD",
"lifelines": "MIT",
"line_profiler": "BSD",
"lxml": "BSD",
"Mako": "MIT",
"marisa_trie": "MIT",
"mock": "BSD",
"msgpack": "Apache Software License",
"multimethods": "MIT",
"multipledispatch": "BSD",
"nodeenv": "BSD",
"oauthlib": "BSD",
"patsy": "BSD",
"pbr": "Apache Software License",
"pims": "~BSD",
"pipdeptree": "MIT",
"PyAudio": "MIT",
"pycrypto": "Public Domain + patent for some algorithm",
"planar": "BSD",
"pygal_maps_world": "LGPLv3+",
"pymesos": "BSD",
"pystache": "MIT",
"pytz": "MIT",
"requests_oauthlib": "ISC",
"rope_py3k": "GPL=OpenBSD",
"smart_open": "Public Domain",
"textblob": "MIT",
"tzlocal": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"viscm": "MIT",
"zs": "BSD",
"neural-python": "MIT",
"dask": "BSD",
"NLopt": "LGPL",
"ipython_genutils": "BSD",
"memory_profiler": "BSD",
"snakeviz": "BSD",
"ansi2html": "GPLv3+",
"brewer2mpl": "MIT",
"dev": "Public Domain",
"django-model-utils": "BSD",
"django-userena": "BSD",
"django-uuidfield": "~BSD",
"easy-thumbnails": "BSD",
"gunicorn": "MIT",
"lockfile": "MIT",
"gevent": "MIT",
"lz4": "BSD",
"opencv_python": "BSD",
"ptyprocess": "ISC",
"terminado": "BSD",
"easy_thumbnails": "BSD",
"llvmpy": "BSD",
"pysterior": "MIT",
"pymc3": "Apache-2",
"tqdm": "MIT",
"pandas-highcharts": "MIT",
"holoviews": "BSD",
"cyordereddict": "MIT",
"future": "MIT",
"python-gmaps": "BSD",
"libpython": "Python",
"pyowm": "MIT",
"PyMySQL": "MIT",
"osmapi": "GPLv3",
"progressbar2": "BSD",
"wordcloud": "MIT",
"django-contrib-comments": "BSD",
"filebrowser_safe": "~MIT",
"grappelli_safe": "~MIT",
"mezzanine": "BSD",
"affine": "BSD",
"blocks": "GPL",
"blz": "BSD",
"chest": "BSD",
"configobj": "BSD",
"distributed": "BSD",
"gatspy": "BSD",
"guidata": "CeCILL v2",
"guiqwt": "CECILL",
"heapdict": "BSD",
"httpretty": "MIT",
"jaraco.structures": "MIT",
"jdcal": "BSD",
"jmespath": "MIT",
"keyring": "MIT, Python",
"locket": "BSD",
"onedrive-sdk-python": "MIT",
"pandocfilters": "BSD",
"passlib": "BSD",
"picklable-itertools": "MIT",
"plac": "BSD",
"pycryptodomex": "BSD",
"PyOpenGL_accelerate": "BSD",
"pyOpenSSL": "Apache 2.0",
"pyRFXtrx": "LGPLv3+",
"pysmi": "BSD",
"pythonnet": "MIT",
"pythonqwt": "MIT",
"queuelib": "BSD",
"semantic_version": "BSD",
"slicerator": "BSD",
"sqlite_bro": "MIT",
"supersmoother": "BSD 3-clause",
"tblib": "BSD",
"unidecode": "GPLv2+",
"vincenty": "none",
"user-agent": "MIT",
"wget": "none",
"xxhash": "BSD",
"zope.exceptions": "Zope Public License",
"w3lib": "BSD",
"astropy": "BSD",
"backports_abc": "Python Software Foundation License",
"boto3": "Apache Software License ",
"botocore": "Apache Software License ",
"scoop": "GNU Library or Lesser General Public License (LGPL)",
"hikvision": "MIT",
"PyMata": "GPLv3+",
"python-mpd2": "LGPL",
"python-nest": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License",
"temperusb": "GNU GENERAL PUBLIC LICENSE",
"sphinx-corlab-theme": "LGPLv3+",
"sphinx-docs-theme": "MIT",
"sphinxjp.themes.gopher": "MIT",
"sphinxjp.themes.htmlslide": "MIT",
"zerovm-sphinx-theme": "Apache License 2.0",
"xarray": "Apache Software License",
"typing": "PSF",
"sklearn_pandas": "~MIT",
"skll": "BSD",
"abcpmc": "GPLv3+",
"ad3": "GPLv3",
"amqp": "LGPL",
"ansiconv": "MIT",
"apscheduler": "MIT",
"autopy3": "MIT",
"azure-batch-apps": "MIT",
"azureml": "MIT",
"bigfloat": "LGPLv3",
"billiard": "BSD",
"biopython": "~MIT",
"bleach": "Apache",
"blist": "BSD",
"blosc": "MIT",
"bqplot": "Apache",
"btrees": "Zope Public License",
"bz2file": "Apache",
"Cartopy": "LGPLv3+",
"chalmers": "MIT",
"CherryPy": "BSD",
"cobble": "BSD",
"comtypes": "MIT",
"contextlib2": "Python",
"cssselect": "BSD",
"cubes": "MIT",
"cuda4py": "BSD",
"CVXcanon": "?",
"cvxpy": "GNU",
"cymem": "MIT",
"datashader": "BSD",
"db.py": "BSD",
"deap": "LGPL",
"Django": "BSD",
"django-audiotracks": "MIT",
"django-celery": "BSD",
"django-configurations": "BSD",
"django-environ": "MIT",
"django-guardian": "BSD",
"django-storages": "BSD",
"dlib": "boost",
"dnspython": "~MIT",
"docrepr": "Modified BSD",
"entrypoints": "MIT",
"envoy": "~MIT",
"et_xmlfile": "MIT",
"Flask-Login": "MIT",
"Flask-SQLAlchemy": "BSD",
"GDAL": "MIT",
"gensim": "LGPLv2+",
"geoplotlib": "MIT",
"geopy": "MIT",
"google-api-python-client": "Apache",
"googlemaps": "Apache",
"grab": "MIT",
"greenlet": "MIT",
"grequests": "BSD",
"grin": "BSD",
"HDDM": "BSD",
"heatmap": "MIT",
"Hebel": "GPLv2",
"hmmlearn": "MIT + Copyright",
"html2text": "GPL",
"html5lib": "MIT",
"httpie": "BSD",
"imageio": "BSD",
"imbox": "MIT",
"invoke": "BSD",
"ipyparallel": "BSD",
"jedi": "MIT",
"jieba": "MIT",
"julia": "MIT",
"jupytalk": "MIT",
"Keras": "MIT",
"Kivy": "MIT",
"kivy-garden": "MIT",
"luigi": "Apache 2.0",
"ldap3": "LGPLv3",
"lazy-object-proxy": "BSD",
"AnyQt": "GPLv3",
"Automat": "MIT",
"azure-datalake-store": "MIT",
"azure-keyvault": "MIT",
"azure-mgmt-containerregistry": "MIT",
"azure-mgmt-datalake-analytics": "MIT",
"azure-mgmt-datalake-nspkg": "MIT",
"azure-mgmt-datalake-store": "MIT",
"azure-mgmt-devtestlabs": "MIT",
"azure-mgmt-dns": "MIT",
"azure-mgmt-documentdb": "MIT",
"azure-mgmt-iothub": "MIT",
"azure-mgmt-keyvault": "MIT",
"azure-mgmt-monitor": "MIT",
"azure-mgmt-rdbms": "MIT",
"azure-mgmt-sql": "MIT",
"azure-mgmt-trafficmanager": "MIT",
"azure-servicefabric": "MIT",
"backports.functools_lru_cache": "MIT",
"bcrypt": "Apache License 2.0",
"blockdiag": "Apache License 2.0",
"Bottlechest": "Simplified BSD",
"CacheControl": "Apache Software License",
"category_encoders": "BSD",
"Cheroot": "BSD",
"colorcet": "Creative Commons Attribution 4.0 International Public License (CC-BY)",
"colorlover": "MIT",
"defusedxml": "Python Software Foundation License Version 2",
"docx2txt": "MIT",
"dominate": "GNU Lesser General Public License (LGPL)",
"drawtree": "Apache License 2.0",
"edward": "MIT",
"elasticsearch": "Apache License 2.0",
"ensae_projects": "MIT",
"Fabric3": "BSD",
"fastparquet": "Apache License 2.0",
"fasttext": "BSD 3-Clause License",
"fbprophet": "BSD",
"filelock": "Unlicensed",
"ftfy": "MIT",
"funcparserlib": "MIT",
"gitdb": "BSD",
"GitPython": "BSD",
"glue-core": "BSD",
"glue-vispy-viewers": "As Is",
"gvar": "GPLv3+",
"h2": "MIT",
"holopy": "GNU GENERAL PUBLIC LICENSE",
"hpack": "MIT",
"hyper": "MIT",
"hyperframe": "MIT",
"hyperlink": "MIT",
"hyperspy": "GNU General Public License v3 (GPLv3)",
"images2gif": "Unlicensed",
"ipaddress": "Python Software Foundation License",
"JPype1": "Apache Software License",
"jsonpickle": "BSD",
"jupyter_contrib_nbextensions ": "BSD",
"jupyter_sphinx": "BSD",
"jyquickhelper": "MIT",
"kabuki": "As Is",
"libLAS": "BSD",
"lmfit": "BSD",
"lsqfit": "GPLv3+",
"lru-dict": "MIT",
"mammoth": "BSD",
"mbstrdecoder": "MIT",
"mdn-sphinx-theme": "Mozilla Public License 2.0 (MPL 2.0)",
"missingno": "As Is",
"mkl-service": "Anaconda End User License Agreement",
"mlstatpy": "MIT",
"monotonic": "Apache",
"monty": "MIT",
"msgpack-numpy": "BSD",
"nbdime": "BSD",
"nose-parameterized": "BSD",
"octave_kernel": "BSD",
"odfpy": "GNU General Public License v.2 + Apache License v.2",
"Orange3": "GPLv3+",
"Orange3-Associate": "GPLv3+",
"Orange3-ImageAnalytics": "GPLv3+",
"Orange3-Network": "GPLv3+",
"Orange3-Text": "GPLv3+",
"palettable": "MIT",
"path.py": "MIT",
"pathvalidate": "MIT",
"pdfminer3k": "MIT",
"pdfrw": "MIT",
"PIMS": "As Is",
"Pint": "BSD",
"pybars3": "GNU Library or Lesser General Public License (LGPL)",
"pyclustering": "GNU General Public License v3 (GPLv3)",
"pyemd": "MIT",
"pyflux": "As Is",
"pygal_maps_ch": "GNU LGPL v3+",
"pygal_maps_fr": "GNU LGPL v3+",
"pygal_sphinx_directives": "GNU LGPL v3+",
"pyinstrument": "BSD",
"pymatgen": "MIT",
"pyopencl": "MIT",
"PyOpenGL": "BSD",
"pyPdf": "BSD",
"PyPDF2": "BSD",
"pypmc": "GPLv2",
"pypng": "MIT",
"pystan": "GNU General Public License v3 (GPLv3)",
"PySide": "GNU Library or Lesser General Public License (LGPL)",
"pytablereader": "MIT",
"python-mimeparse": "MIT",
"python3-linkedin": "MIT",
"python-Levenshtein": "GNU General Public License v2 or later (GPLv2+)",
"pythreejs": "BSD",
"PyX": "GNU General Public License (GPL)",
"qutip": "BSD",
"recommonmark": "MIT",
"regex": "Python Software Foundation License",
"Rtree": "GNU Library or Lesser General Public License (LGPL)",
"sacred": "MIT",
"scikit-fusion": "GPLv3",
"service_identity": "MIT",
"setproctitle": "BSD",
"setuptools-git": "BSD",
"sdepy": "BSD",
"simhash": "MIT",
"SimpleSQLite": "MIT",
"SIP": "GPL v2 or GPL v3 or BSD",
"smmap": "BSD",
"sounddevice": "MIT",
"spglib": "BSD",
"sphinx_theme_pd": "MIT",
"sphinxcontrib-blockdiag": "BSD",
"splinter": "As Is",
"TA-Lib": "BSD",
"teachpyx": "MIT",
"tempora": "MIT",
"tensorflow": "Apache Software License",
"termcolor": "MIT",
"testpath": "MIT",
"tkinterquickhelper": "MIT",
"toml": "MIT",
"toyplot": "BSD",
"traits": "BSD",
"triangle": "GNU LGPL",
"typepy": "MIT",
"update_checker": "Simplified BSD License",
"validate_email": "LGPL",
"webcolors": "BSD",
"wikipedia": "MIT",
"win_unicode_console": "MIT",
"xlwings": "BSD",
}
|
sdpython/pymyinstall
|
src/pymyinstall/installhelper/missing_license.py
|
Python
|
mit
| 12,065
|
[
"Biopython",
"pymatgen"
] |
483c1b2be195f94b9f8742594d02a34cdef1e1004963ecb6a7ae5250381857e8
|
import numpy as np
from ase.structure import molecule
from ase.dft import Wannier
from gpaw import GPAW
from gpaw.test import equal
# Test of ase wannier using gpaw
calc = GPAW(gpts=(32, 32, 32), nbands=4)
atoms = molecule('H2', calculator=calc)
atoms.center(vacuum=3.)
e = atoms.get_potential_energy()
niter = calc.get_number_of_iterations()
pos = atoms.positions + np.array([[0, 0, .2339], [0, 0, -.2339]])
com = atoms.get_center_of_mass()
wan = Wannier(nwannier=2, calc=calc, initialwannier='bloch')
equal(wan.get_functional_value(), 2.964, 1e-3)
equal(np.linalg.norm(wan.get_centers() - [com, com]), 0, 1e-4)
wan = Wannier(nwannier=2, calc=calc, initialwannier='projectors')
equal(wan.get_functional_value(), 3.100, 1e-3)
equal(np.linalg.norm(wan.get_centers() - pos), 0, 1e-3)
wan = Wannier(nwannier=2, calc=calc, initialwannier=[[0, 0, .5], [1, 0, .5]])
equal(wan.get_functional_value(), 3.100, 1e-3)
equal(np.linalg.norm(wan.get_centers() - pos), 0, 1e-3)
wan.localize()
equal(wan.get_functional_value(), 3.100, 1e-3)
equal(np.linalg.norm(wan.get_centers() - pos), 0, 1e-3)
equal(np.linalg.norm(wan.get_radii() - 1.2393), 0, 1e-4)
eig = np.sort(np.linalg.eigvals(wan.get_hamiltonian().real))
equal(np.linalg.norm(eig - calc.get_eigenvalues()[:2]), 0, 1e-4)
energy_tolerance = 0.00005
niter_tolerance = 0
equal(e, -6.65064, energy_tolerance)
assert 16 <= niter <= 17, niter
|
ajylee/gpaw-rtxs
|
gpaw/test/asewannier.py
|
Python
|
gpl-3.0
| 1,388
|
[
"ASE",
"GPAW"
] |
fe3ff238acb9ab9a6660f6b325fed75c4099682525b3c145defdb0d138bf228a
|
"""Display the contents of the implementation cache."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
import os, sys
import gtk
from zeroinstall.injector import namespaces, model
from zeroinstall.zerostore import BadDigest, manifest
from zeroinstall import support
from zeroinstall.support import basedir, tasks
from zeroinstall.gtkui import help_box, gtkutils
__all__ = ['CacheExplorer']
ROX_IFACE = 'http://rox.sourceforge.net/2005/interfaces/ROX-Filer'
# Tree view columns
class Column(object):
columns = []
def __init__(self, name, column_type, resizable=False, props={}, hide=False, markup=False):
self.idx = len(self.columns)
self.columns.append(self)
self.name = name
self.column_type = column_type
self.props = props
self.resizable = resizable
self.hide = hide
self.markup = markup
@classmethod
def column_types(cls):
return [col.column_type for col in cls.columns]
@classmethod
def add_all(cls, tree_view):
[col.add(tree_view) for col in cls.columns]
def get_cell(self):
cell = gtk.CellRendererText()
self.set_props(cell, self.props)
return cell
def set_props(self, obj, props):
for k,v in props.items():
obj.set_property(k, v)
def get_column(self):
if self.markup:
kwargs = {'markup': self.idx}
else:
kwargs = {'text': self.idx}
column = gtk.TreeViewColumn(self.name, self.get_cell(), **kwargs)
if 'xalign' in self.props:
self.set_props(column, {'alignment': self.props['xalign']})
return column
def add(self, tree_view):
if self.hide:
return
column = self.get_column()
if self.resizable: column.set_resizable(True)
tree_view.append_column(column)
NAME = Column(_('Name'), str, hide=True)
URI = Column(_('URI'), str, hide=True)
TOOLTIP = Column(_('Description'), str, hide=True)
ITEM_VIEW = Column(_('Item'), str, props={'ypad': 6, 'yalign': 0}, resizable=True, markup=True)
SELF_SIZE = Column(_('Self Size'), int, hide=True)
TOTAL_SIZE = Column(_('Total Size'), int, hide=True)
PRETTY_SIZE = Column(_('Size'), str, props={'xalign':1.0})
ITEM_OBJECT = Column(_('Object'), object, hide=True)
ACTION_REMOVE = object() # just make a unique value
class Section(object):
may_delete = False
def __init__(self, name, tooltip):
self.name = name
self.tooltip = tooltip
def append_to(self, model):
return model.append(None, extract_columns(
name=self.name,
tooltip=self.tooltip,
object=self,
))
SECTION_INTERFACES = Section(
_("Feeds"),
_("Feeds in the cache"))
SECTION_UNOWNED_IMPLEMENTATIONS = Section(
_("Unowned implementations and temporary files"),
_("These probably aren't needed any longer. You can delete them."))
SECTION_INVALID_INTERFACES = Section(
_("Invalid feeds (unreadable)"),
_("These feeds exist in the cache but cannot be read. You should probably delete them."))
import cgi
def extract_columns(**d):
vals = list(map(lambda x:None, Column.columns))
def setcol(column, val):
vals[column.idx] = val
name = d.get('name', None)
desc = d.get('desc', None)
uri = d.get('uri', None)
setcol(NAME, name)
setcol(URI, uri)
if name and uri:
setcol(ITEM_VIEW, '<span font-size="larger" weight="bold">%s</span>\n'
'<span color="#666666">%s</span>' % tuple(map(cgi.escape, (name, uri))))
else:
setcol(ITEM_VIEW, cgi.escape(name or desc))
size = d.get('size', 0)
setcol(SELF_SIZE, size)
setcol(TOTAL_SIZE, 0) # must be set to prevent type error
setcol(TOOLTIP, d.get('tooltip', None))
setcol(ITEM_OBJECT, d.get('object', None))
return vals
menu = None
def popup_menu(bev, obj, model, path, cache_explorer):
global menu # Fixes Python 3 GC issues
menu = gtk.Menu()
for i in obj.menu_items:
if i is None:
item = gtk.SeparatorMenuItem()
else:
name, cb = i
item = gtk.MenuItem()
item.set_label(name)
def _cb(item, cb=cb):
action_required = cb(obj, cache_explorer)
if action_required is ACTION_REMOVE:
model.remove(model.get_iter(path))
item.connect('activate', _cb)
item.show()
menu.append(item)
if gtk.pygtk_version >= (2, 90):
menu.popup(None, None, None, None, bev.button, bev.time)
else:
menu.popup(None, None, None, bev.button, bev.time)
def warn(message, parent=None):
"Present a blocking warning message with OK/Cancel buttons, and return True if OK was pressed"
dialog = gtk.MessageDialog(parent=parent, buttons=gtk.BUTTONS_OK_CANCEL, type=gtk.MESSAGE_WARNING)
dialog.set_property('text', message)
response = []
def _response(dialog, resp):
if resp == gtk.RESPONSE_OK:
response.append(True)
dialog.connect('response', _response)
dialog.run()
dialog.destroy()
return bool(response)
def size_if_exists(path):
"Get the size for a file, or 0 if it doesn't exist."
if path and os.path.isfile(path):
return os.path.getsize(path)
return 0
def get_size(path):
"Get the size for a directory tree. Get the size from the .manifest if possible."
man = os.path.join(path, '.manifest')
if os.path.exists(man):
size = os.path.getsize(man)
with open(man, 'rt') as stream:
for line in stream:
if line[:1] in "XF":
size += int(line.split(' ', 4)[3])
else:
size = 0
for root, dirs, files in os.walk(path):
for name in files:
size += os.path.getsize(os.path.join(root, name))
return size
def summary(feed):
if feed.summary:
return feed.get_name() + ' - ' + feed.summary
return feed.get_name()
def get_selected_paths(tree_view):
model, paths = tree_view.get_selection().get_selected_rows()
return paths
def all_children(model, iter):
"make a python generator out of the children of `iter`"
iter = model.iter_children(iter)
while iter:
yield iter
iter = model.iter_next(iter)
# Responses
DELETE = 0
SAFE_MODE = False # really delete things
#SAFE_MODE = True # print deletes, instead of performing them
class CachedFeed(object):
def __init__(self, uri, size):
self.uri = uri
self.size = size
def delete(self):
if not os.path.isabs(self.uri):
cached_iface = basedir.load_first_cache(namespaces.config_site,
'interfaces', model.escape(self.uri))
if cached_iface:
if SAFE_MODE:
print("Delete", cached_iface)
else:
os.unlink(cached_iface)
user_overrides = basedir.load_first_config(namespaces.config_site,
namespaces.config_prog,
'interfaces', model._pretty_escape(self.uri))
if user_overrides:
if SAFE_MODE:
print("Delete", user_overrides)
else:
os.unlink(user_overrides)
def __cmp__(self, other):
return self.uri.__cmp__(other.uri)
class ValidFeed(CachedFeed):
def __init__(self, feed, size):
CachedFeed.__init__(self, feed.url, size)
self.feed = feed
self.in_cache = []
def delete_children(self):
deletable = self.deletable_children()
undeletable = list(filter(lambda child: not child.may_delete, self.in_cache))
# the only undeletable items we expect to encounter are LocalImplementations
unexpected_undeletable = list(filter(lambda child: not isinstance(child, LocalImplementation), undeletable))
assert not unexpected_undeletable, "unexpected undeletable items!: %r" % (unexpected_undeletable,)
[child.delete() for child in deletable]
def delete(self):
self.delete_children()
super(ValidFeed, self).delete()
def append_to(self, model, iter):
iter2 = model.append(iter, extract_columns(
name=self.feed.get_name(),
uri=self.uri,
tooltip=self.feed.summary,
object=self))
for cached_impl in self.in_cache:
cached_impl.append_to(model, iter2)
def launch(self, explorer):
os.spawnlp(os.P_NOWAIT, '0launch', '0launch', '--gui', self.uri)
def copy_uri(self, explorer):
clipboard = gtk.clipboard_get()
clipboard.set_text(self.uri)
primary = gtk.clipboard_get('PRIMARY')
primary.set_text(self.uri)
def deletable_children(self):
return list(filter(lambda child: child.may_delete, self.in_cache))
def prompt_delete(self, cache_explorer):
description = "\"%s\"" % (self.feed.get_name(),)
num_children = len(self.deletable_children())
if self.in_cache:
description += _(" (and %s %s)") % (num_children, _("implementation") if num_children == 1 else _("implementations"))
if warn(_("Really delete %s?") % (description,), parent=cache_explorer.window):
self.delete()
return ACTION_REMOVE
menu_items = [(_('Launch with GUI'), launch),
(_('Copy URI'), copy_uri),
(_('Delete'), prompt_delete)]
class RemoteFeed(ValidFeed):
may_delete = True
class LocalFeed(ValidFeed):
may_delete = False
class InvalidFeed(CachedFeed):
may_delete = True
def __init__(self, uri, ex, size):
CachedFeed.__init__(self, uri, size)
self.ex = ex
def append_to(self, model, iter):
model.append(iter, extract_columns(
name=self.uri.rsplit('/', 1)[-1],
uri=self.uri,
size=self.size,
tooltip=self.ex,
object=self))
class LocalImplementation:
may_delete = False
def __init__(self, impl):
self.impl = impl
def append_to(self, model, iter):
model.append(iter, extract_columns(
name=self.impl.local_path,
tooltip=_('This is a local version, not held in the cache.'),
object=self))
class CachedImplementation:
may_delete = True
def __init__(self, cache_dir, digest):
self.impl_path = os.path.join(cache_dir, digest)
self.size = get_size(self.impl_path)
self.digest = digest
def delete(self):
if SAFE_MODE:
print("Delete", self.impl_path)
else:
support.ro_rmtree(self.impl_path)
def open_rox(self, explorer):
os.spawnlp(os.P_WAIT, '0launch', '0launch', ROX_IFACE, '-d', self.impl_path)
def verify(self, explorer):
try:
manifest.verify(self.impl_path)
except BadDigest as ex:
box = gtk.MessageDialog(None, 0,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, str(ex))
if ex.detail:
swin = gtk.ScrolledWindow()
buffer = gtk.TextBuffer()
mono = buffer.create_tag('mono', family = 'Monospace')
buffer.insert_with_tags(buffer.get_start_iter(), ex.detail, mono)
text = gtk.TextView(buffer)
text.set_editable(False)
text.set_cursor_visible(False)
swin.add(text)
swin.set_shadow_type(gtk.SHADOW_IN)
swin.set_border_width(4)
box.vbox.pack_start(swin)
swin.show_all()
box.set_resizable(True)
else:
box = gtk.MessageDialog(None, 0,
gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_('Contents match digest; nothing has been changed.'))
box.run()
box.destroy()
def prompt_delete(self, explorer):
if warn(_("Really delete implementation?"), parent=explorer.window):
self.delete()
return ACTION_REMOVE
if sys.version_info[0] > 2:
def __lt__(self, other):
return self.digest < other.digest
def __eq__(self, other):
return self.digest == other.digest
menu_items = [(_('Open in ROX-Filer'), open_rox),
(_('Verify integrity'), verify),
(_('Delete'), prompt_delete)]
class UnusedImplementation(CachedImplementation):
def append_to(self, model, iter):
model.append(iter, extract_columns(
name=self.digest,
size=self.size,
tooltip=self.impl_path,
object=self))
class KnownImplementation(CachedImplementation):
def __init__(self, cached_iface, cache_dir, impl, impl_size, digest):
CachedImplementation.__init__(self, cache_dir, digest)
self.cached_iface = cached_iface
self.impl = impl
self.size = impl_size
def delete(self):
if SAFE_MODE:
print("Delete", self.impl)
else:
CachedImplementation.delete(self)
self.cached_iface.in_cache.remove(self)
def append_to(self, model, iter):
impl = self.impl
label = _('Version %(implementation_version)s (%(arch)s)') % {
'implementation_version': impl.get_version(),
'arch': impl.arch or 'any platform'}
model.append(iter, extract_columns(
name=label,
size=self.size,
tooltip=self.impl_path,
object=self))
def __cmp__(self, other):
if hasattr(other, 'impl'):
return self.impl.__cmp__(other.impl)
return -1
if sys.version_info[0] > 2:
def __lt__(self, other):
return self.impl.__lt__(other.impl)
def __eq__(self, other):
return self.impl.__eq__(other.impl)
class CacheExplorer:
"""A graphical interface for viewing the cache and deleting old items."""
def __init__(self, iface_cache):
widgets = gtkutils.Template(os.path.join(os.path.dirname(__file__), 'cache.ui'), 'cache')
self.window = window = widgets.get_widget('cache')
window.set_default_size(gtk.gdk.screen_width() / 2, gtk.gdk.screen_height() / 2)
self.iface_cache = iface_cache
# Model
self.raw_model = gtk.TreeStore(*Column.column_types())
self.view_model = self.raw_model.filter_new()
self.model.set_sort_column_id(URI.idx, gtk.SORT_ASCENDING)
self.tree_view = widgets.get_widget('treeview')
Column.add_all(self.tree_view)
# Sort / Filter options:
def init_combo(combobox, items, on_select):
liststore = gtk.ListStore(str)
combobox.set_model(liststore)
cell = gtk.CellRendererText()
combobox.pack_start(cell, True)
combobox.add_attribute(cell, 'text', 0)
for item in items:
combobox.append_text(item[0])
combobox.set_active(0)
def _on_select(*a):
selected_item = combobox.get_active()
on_select(selected_item)
combobox.connect('changed', lambda *a: on_select(items[combobox.get_active()]))
def set_sort_order(sort_order):
#print "SORT: %r" % (sort_order,)
name, column, order = sort_order
self.model.set_sort_column_id(column.idx, order)
self.sort_combo = widgets.get_widget('sort_combo')
init_combo(self.sort_combo, SORT_OPTIONS, set_sort_order)
def set_filter(f):
#print "FILTER: %r" % (f,)
description, filter_func = f
self.view_model = self.model.filter_new()
self.view_model.set_visible_func(filter_func)
self.tree_view.set_model(self.view_model)
self.set_initial_expansion()
self.filter_combo = widgets.get_widget('filter_combo')
init_combo(self.filter_combo, FILTER_OPTIONS, set_filter)
def button_press(tree_view, bev):
if bev.button != 3:
return False
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
obj = self.model[path][ITEM_OBJECT.idx]
if obj and hasattr(obj, 'menu_items'):
popup_menu(bev, obj, model=self.model, path=path, cache_explorer=self)
self.tree_view.connect('button-press-event', button_press)
# Responses
window.set_default_response(gtk.RESPONSE_CLOSE)
selection = self.tree_view.get_selection()
def selection_changed(selection):
any_selected = False
for x in get_selected_paths(self.tree_view):
obj = self.model[x][ITEM_OBJECT.idx]
if obj is None or not obj.may_delete:
window.set_response_sensitive(DELETE, False)
return
any_selected = True
window.set_response_sensitive(DELETE, any_selected)
selection.set_mode(gtk.SELECTION_MULTIPLE)
selection.connect('changed', selection_changed)
selection_changed(selection)
def response(dialog, resp):
if resp == gtk.RESPONSE_CLOSE:
window.destroy()
elif resp == gtk.RESPONSE_HELP:
cache_help.display()
elif resp == DELETE:
self._delete()
window.connect('response', response)
@property
def model(self):
return self.view_model.get_model()
def _delete(self):
errors = []
model = self.model
paths = get_selected_paths(self.tree_view)
paths.reverse()
for path in paths:
item = model[path][ITEM_OBJECT.idx]
assert item.delete
try:
item.delete()
except OSError as ex:
errors.append(str(ex))
else:
model.remove(model.get_iter(path))
self._update_sizes()
if errors:
gtkutils.show_message_box(self.window, _("Failed to delete:\n%s") % '\n'.join(errors))
def show(self):
"""Display the window and scan the caches to populate it."""
self.window.show()
self.window.get_window().set_cursor(gtkutils.get_busy_pointer())
gtk.gdk.flush()
# (async so that the busy pointer works on GTK 3)
@tasks.async
def populate():
populate = self._populate_model()
yield populate
try:
tasks.check(populate)
except:
import logging
logging.warn("fail", exc_info = True)
raise
# (we delay until here because inserting with the view set is very slow)
self.tree_view.set_model(self.view_model)
self.set_initial_expansion()
return populate()
def set_initial_expansion(self):
model = self.model
try:
i = model.get_iter_root()
while i:
# expand only "Feeds"
if model[i][ITEM_OBJECT.idx] is SECTION_INTERFACES:
self.tree_view.expand_row(model.get_path(i), False)
i = model.iter_next(i)
finally:
self.window.get_window().set_cursor(None)
@tasks.async
def _populate_model(self):
# Find cached implementations
unowned = {} # Impl ID -> Store
duplicates = [] # TODO
for s in self.iface_cache.stores.stores:
if os.path.isdir(s.dir):
for id in os.listdir(s.dir):
if id in unowned:
duplicates.append(id)
unowned[id] = s
ok_feeds = []
error_feeds = []
# Look through cached feeds for implementation owners
all_interfaces = self.iface_cache.list_all_interfaces()
all_feeds = {}
for uri in all_interfaces:
try:
iface = self.iface_cache.get_interface(uri)
except Exception as ex:
error_feeds.append((uri, str(ex), 0))
else:
all_feeds.update(self.iface_cache.get_feeds(iface))
for url, feed in all_feeds.items():
if not feed: continue
yield
feed_size = 0
try:
if url != feed.url:
# (e.g. for .new feeds)
raise Exception('Incorrect URL for feed (%s vs %s)' % (url, feed.url))
if os.path.isabs(url):
cached_feed = url
feed_type = LocalFeed
else:
feed_type = RemoteFeed
cached_feed = basedir.load_first_cache(namespaces.config_site,
'interfaces', model.escape(url))
user_overrides = basedir.load_first_config(namespaces.config_site,
namespaces.config_prog,
'interfaces', model._pretty_escape(url))
feed_size = size_if_exists(cached_feed) + size_if_exists(user_overrides)
except Exception as ex:
error_feeds.append((url, str(ex), feed_size))
else:
cached_feed = feed_type(feed, feed_size)
for impl in feed.implementations.values():
if impl.local_path:
cached_feed.in_cache.append(LocalImplementation(impl))
for digest in impl.digests:
if digest in unowned:
cached_dir = unowned[digest].dir
impl_path = os.path.join(cached_dir, digest)
impl_size = get_size(impl_path)
cached_feed.in_cache.append(KnownImplementation(cached_feed, cached_dir, impl, impl_size, digest))
del unowned[digest]
cached_feed.in_cache.sort()
ok_feeds.append(cached_feed)
if error_feeds:
iter = SECTION_INVALID_INTERFACES.append_to(self.raw_model)
for uri, ex, size in error_feeds:
item = InvalidFeed(uri, ex, size)
item.append_to(self.raw_model, iter)
unowned_sizes = []
local_dir = os.path.join(basedir.xdg_cache_home, '0install.net', 'implementations')
for id in unowned:
if unowned[id].dir == local_dir:
impl = UnusedImplementation(local_dir, id)
unowned_sizes.append((impl.size, impl))
if unowned_sizes:
iter = SECTION_UNOWNED_IMPLEMENTATIONS.append_to(self.raw_model)
for size, item in unowned_sizes:
item.append_to(self.raw_model, iter)
if ok_feeds:
iter = SECTION_INTERFACES.append_to(self.raw_model)
for item in ok_feeds:
yield
item.append_to(self.raw_model, iter)
self._update_sizes()
def _update_sizes(self):
"""Set TOTAL_SIZE and PRETTY_SIZE to the total size, including all children."""
m = self.raw_model
def update(itr):
total = m[itr][SELF_SIZE.idx]
total += sum(map(update, all_children(m, itr)))
m[itr][PRETTY_SIZE.idx] = support.pretty_size(total) if total else '-'
m[itr][TOTAL_SIZE.idx] = total
return total
itr = m.get_iter_root()
while itr:
update(itr)
itr = m.iter_next(itr)
SORT_OPTIONS = [
('URI', URI, gtk.SORT_ASCENDING),
('Name', NAME, gtk.SORT_ASCENDING),
('Size', TOTAL_SIZE, gtk.SORT_DESCENDING),
]
def init_filters():
def filter_only(filterable_types, filter_func):
def _filter(model, iter):
obj = model.get_value(iter, ITEM_OBJECT.idx)
if any((isinstance(obj, t) for t in filterable_types)):
result = filter_func(model, iter)
return result
return True
return _filter
def not_(func):
return lambda *a: not func(*a)
def is_local_feed(model, iter):
return isinstance(model[iter][ITEM_OBJECT.idx], LocalFeed)
def has_implementations(model, iter):
return model.iter_has_child(iter)
return [
('All', lambda *a: True),
('Feeds with implementations', filter_only([ValidFeed], has_implementations)),
('Feeds without implementations', filter_only([ValidFeed], not_(has_implementations))),
('Local Feeds', filter_only([ValidFeed], is_local_feed)),
('Remote Feeds', filter_only([ValidFeed], not_(is_local_feed))),
]
FILTER_OPTIONS = init_filters()
cache_help = help_box.HelpBox(_("Cache Explorer Help"),
(_('Overview'), '\n' +
_("""When you run a program using Zero Install, it downloads the program's 'feed' file, \
which gives information about which versions of the program are available. This feed \
file is stored in the cache to save downloading it next time you run the program.
When you have chosen which version (implementation) of the program you want to \
run, Zero Install downloads that version and stores it in the cache too. Zero Install lets \
you have many different versions of each program on your computer at once. This is useful, \
since it lets you use an old version if needed, and different programs may need to use \
different versions of libraries in some cases.
The cache viewer shows you all the feeds and implementations in your cache. \
This is useful to find versions you don't need anymore, so that you can delete them and \
free up some disk space.""")),
(_('Invalid feeds'), '\n' +
_("""The cache viewer gets a list of all feeds in your cache. However, some may not \
be valid; they are shown in the 'Invalid feeds' section. It should be fine to \
delete these. An invalid feed may be caused by a local feed that no longer \
exists or by a failed attempt to download a feed (the name ends in '.new').""")),
(_('Unowned implementations and temporary files'), '\n' +
_("""The cache viewer searches through all the feeds to find out which implementations \
they use. If no feed uses an implementation, it is shown in the 'Unowned implementations' \
section.
Unowned implementations can result from old versions of a program no longer being listed \
in the feed file. Temporary files are created when unpacking an implementation after \
downloading it. If the archive is corrupted, the unpacked files may be left there. Unless \
you are currently unpacking new programs, it should be fine to delete everything in this \
section.""")),
(_('Feeds'), '\n' +
_("""All remaining feeds are listed in this section. You may wish to delete old versions of \
certain programs. Deleting a program which you may later want to run will require it to be downloaded \
again. Deleting a version of a program which is currently running may cause it to crash, so be careful!""")))
|
timdiels/0install
|
zeroinstall/gtkui/cache.py
|
Python
|
lgpl-2.1
| 23,076
|
[
"VisIt"
] |
8bf40822b94e97113ebeaeb9cb88d82d6cc9c2ddf74f31d51b84fc63ea5da2c4
|
"""Module level filter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
DOT = '.'
LEVEL = '__level__'
class ModuleFilter(object):
"""Filter module to set loglevel per module.
::
Resources
{
LogBackends
{
<backend>
{
Filter = MyModuleFilter
}
}
LogFilters
{
MyModuleFilter
{
Plugin = ModuleFilter
dirac = ERROR
dirac.Subprocess = DEBUG
dirac.ILCDIRAC.Interfaces.API.NewInterface = INFO
}
}
}
This results in all debug messages from the Subprocess module to be printed, but only errors from
the rest of dirac. And INFO from a module in an extension. For this to work the global log level
needs to be DEBUG (e.g., -ddd for commands)
"""
def __init__(self, optionDict):
"""Contruct the object, set the base LogLevel to DEBUG, and parse the options."""
self._configDict = {'dirac': {LEVEL: LogLevels.DEBUG}}
optionDict.pop('Plugin', None)
for module, level in optionDict.items():
self.__fillConfig(self._configDict, module.split(DOT), LogLevels.getLevelValue(level))
def __fillConfig(self, baseDict, modules, level):
"""Fill the config Dict with the module information.
Recursivly fill the dictionary for each submodule with given level.
If intermediate modules are not set, use DEBUG
:param dict baseDict: dictionary for current submodules
:param list modules: list of submodule paths to be set
:parma int levelno: level to be set for given module
"""
if len(modules) == 1: # at the end for this setting
if modules[0] in baseDict:
baseDict[modules[0]][LEVEL] = level
else:
baseDict[modules[0]] = {LEVEL: level}
return None
module0 = modules[0]
modules = modules[1:]
if module0 not in baseDict:
# DEBUG is the default loglevel for the root logger
baseDict[module0] = {LEVEL: LogLevels.DEBUG}
return self.__fillConfig(baseDict[module0], modules, level)
def __filter(self, baseDict, hierarchy, levelno):
"""Check if sublevels are defined, or return highest set level.
Recursively go through the configured levels, returns comparison with deepest match
:param dict baseDict: dictionary with information starting at current level
:param list hierarchy: list of module hierarchy
:param int levelno: integer log level of given record
:returns: boolean for filter value
"""
if not hierarchy:
return baseDict.get(LEVEL, -1) <= levelno
if hierarchy[0] in baseDict:
return self.__filter(baseDict[hierarchy[0]], hierarchy[1:], levelno)
return baseDict.get(LEVEL, -1) <= levelno
def filter(self, record):
"""Filter records based on the path of the logger."""
return self.__filter(self._configDict, record.name.split(DOT), record.levelno)
|
yujikato/DIRAC
|
src/DIRAC/Resources/LogFilters/ModuleFilter.py
|
Python
|
gpl-3.0
| 3,054
|
[
"DIRAC"
] |
4cfa2c3bc7c5413199941017a60d23bd3c40f1a3deee4b531ffd12d16ce25d83
|
import ast
import datetime
import re
import secrets
import time
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
Dict,
List,
Optional,
Pattern,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.functional import Promise
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
bulk_cached_fetch,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_muting_users_cache,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
LinkifierDict,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_select_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar("STREAM_NAMES", Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
"""
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
"""
assert user_ids
clause = f"{field} IN %s"
query = query.extra(
where=[clause],
params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(
recipient_id: int, recipient_type: int, recipient_type_id: Optional[int]
) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: "Recipient") -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: "Realm") -> str:
return f"realm_emoji:{realm.id}"
def get_active_realm_emoji_cache_key(realm: "Realm") -> str:
return f"active_realm_emoji:{realm.id}"
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_DESCRIPTION_LENGTH = 1000
MAX_REALM_SUBDOMAIN_LENGTH = 40
MAX_REALM_REDIRECT_URL_LENGTH = 128
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = [
"Google",
"Email",
"GitHub",
"LDAP",
"Dev",
"RemoteUser",
"AzureAD",
"SAML",
"GitLab",
"Apple",
"OpenID Connect",
]
SUBDOMAIN_FOR_ROOT_DOMAIN = ""
WILDCARD_MENTION_THRESHOLD = 15
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# Redirect URL if the Realm has moved to another server
deactivated_redirect = models.URLField(max_length=MAX_REALM_REDIRECT_URL_LENGTH, null=True)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column="max_invites")
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS,
default=2 ** 31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
POLICY_MODERATORS_ONLY = 4
POLICY_EVERYONE = 5
POLICY_NOBODY = 6
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
POLICY_MODERATORS_ONLY,
]
COMMON_MESSAGE_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
POLICY_MODERATORS_ONLY,
POLICY_EVERYONE,
]
INVITE_TO_REALM_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
POLICY_MODERATORS_ONLY,
POLICY_NOBODY,
]
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 259200
# Who in the organization is allowed to add custom emojis.
add_custom_emoji_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to edit topics of any message.
edit_topic_policy: int = models.PositiveSmallIntegerField(default=POLICY_EVERYONE)
# Who in the organization is allowed to invite other users to organization.
invite_to_realm_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to move messages between streams.
move_messages_between_streams_policy: int = models.PositiveSmallIntegerField(
default=POLICY_ADMINS_ONLY
)
user_group_edit_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED
)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Global policy for who is allowed to use wildcard mentions in
# streams with a large number of subscribers. Anyone can use
# wildcard mentions in small streams regardless of this setting.
WILDCARD_MENTION_POLICY_EVERYONE = 1
WILDCARD_MENTION_POLICY_MEMBERS = 2
WILDCARD_MENTION_POLICY_FULL_MEMBERS = 3
WILDCARD_MENTION_POLICY_STREAM_ADMINS = 4
WILDCARD_MENTION_POLICY_ADMINS = 5
WILDCARD_MENTION_POLICY_NOBODY = 6
WILDCARD_MENTION_POLICY_MODERATORS = 7
wildcard_mention_policy: int = models.PositiveSmallIntegerField(
default=WILDCARD_MENTION_POLICY_STREAM_ADMINS,
)
WILDCARD_MENTION_POLICY_TYPES = [
WILDCARD_MENTION_POLICY_EVERYONE,
WILDCARD_MENTION_POLICY_MEMBERS,
WILDCARD_MENTION_POLICY_FULL_MEMBERS,
WILDCARD_MENTION_POLICY_STREAM_ADMINS,
WILDCARD_MENTION_POLICY_ADMINS,
WILDCARD_MENTION_POLICY_NOBODY,
WILDCARD_MENTION_POLICY_MODERATORS,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
EMAIL_ADDRESS_VISIBILITY_MODERATORS = 5
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
EMAIL_ADDRESS_VISIBILITY_MODERATORS,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = (
600 # if changed, also change in admin.js, setting_org.js
)
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = (
600 # if changed, also change in admin.js, setting_org.js
)
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default="en", max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = "general"
INITIAL_PRIVATE_STREAM_NAME = "core team"
STREAM_EVENTS_NOTIFICATION_TOPIC = gettext_lazy("stream events")
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream",
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream",
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
"forever": -1,
}
# For old messages being automatically deleted
message_retention_days: int = models.IntegerField(null=False, default=-1)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org types
ORG_TYPES: Dict[str, Dict[str, Any]] = {
"unspecified": {
"name": "Unspecified",
"id": 0,
"hidden": True,
"hidden_for_sponsorship": True,
"display_order": 0,
},
"business": {
"name": "Business",
"id": 10,
"hidden": False,
"display_order": 1,
},
"opensource": {
"name": "Open-source project",
"id": 20,
"hidden": False,
"display_order": 2,
},
"education_nonprofit": {
"name": "Education (non-profit)",
"id": 30,
"hidden": False,
"display_order": 3,
},
"education": {
"name": "Education (for-profit)",
"id": 35,
"hidden": False,
"display_order": 4,
},
"research": {
"name": "Research",
"id": 40,
"hidden": False,
"display_order": 5,
},
"event": {
"name": "Event or conference",
"id": 50,
"hidden": False,
"display_order": 6,
},
"nonprofit": {
"name": "Non-profit (registered)",
"id": 60,
"hidden": False,
"display_order": 7,
},
"government": {
"name": "Government",
"id": 70,
"hidden": False,
"display_order": 8,
},
"political_group": {
"name": "Political group",
"id": 80,
"hidden": False,
"display_order": 9,
},
"community": {
"name": "Community",
"id": 90,
"hidden": False,
"display_order": 10,
},
"personal": {
"name": "Personal",
"id": 100,
"hidden": False,
"display_order": 100,
},
"other": {
"name": "Other",
"id": 1000,
"hidden": False,
"display_order": 1000,
},
}
org_type: int = models.PositiveSmallIntegerField(
default=ORG_TYPES["unspecified"]["id"],
choices=[(t["id"], t["name"]) for t in ORG_TYPES.values()],
)
UPGRADE_TEXT_STANDARD = gettext_lazy("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
"disabled": {
"name": "None",
"id": 0,
},
"jitsi_meet": {
"name": "Jitsi Meet",
"id": 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
# ID 4 reserved for optional BigBlueButton, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS["zoom"] = {
"name": "Zoom",
"id": 3,
}
if settings.BIG_BLUE_BUTTON_SECRET is not None and settings.BIG_BLUE_BUTTON_URL is not None:
VIDEO_CHAT_PROVIDERS["big_blue_button"] = {"name": "BigBlueButton", "id": 4}
video_chat_provider: int = models.PositiveSmallIntegerField(
default=VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
GIPHY_RATING_OPTIONS = {
"disabled": {
"name": "GIPHY integration disabled",
"id": 0,
},
# Source: https://github.com/Giphy/giphy-js/blob/master/packages/fetch-api/README.md#shared-options
"y": {
"name": "Allow GIFs rated Y (Very young audience)",
"id": 1,
},
"g": {
"name": "Allow GIFs rated G (General audience)",
"id": 2,
},
"pg": {
"name": "Allow GIFs rated PG (Parental guidance)",
"id": 3,
},
"pg-13": {
"name": "Allow GIFs rated PG13 (Parental guidance - under 13)",
"id": 4,
},
"r": {
"name": "Allow GIFs rated R (Restricted)",
"id": 5,
},
}
# maximum rating of the GIFs that will be retrieved from GIPHY
giphy_rating: int = models.PositiveSmallIntegerField(default=GIPHY_RATING_OPTIONS["g"]["id"])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_custom_emoji_policy=int,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
move_messages_between_streams_policy=int,
default_language=str,
default_twenty_four_hour_time=bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
giphy_rating=int,
invite_required=bool,
invite_to_realm_policy=int,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
message_content_delete_limit_seconds=int,
wildcard_mention_policy=int,
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = "G"
ICON_UPLOADED = "U"
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, "Hosted by Gravatar"),
(ICON_UPLOADED, "Uploaded by administrator"),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR,
choices=ICON_SOURCES,
max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of web app navbar UI.
LOGO_DEFAULT = "D"
LOGO_UPLOADED = "U"
LOGO_SOURCES = (
(LOGO_DEFAULT, "Default to Zulip"),
(LOGO_UPLOADED, "Uploaded by administrator"),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT,
choices=LOGO_SOURCES,
max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT,
choices=LOGO_SOURCES,
max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600 * 24 * 7)
def get_emoji(self) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600 * 24 * 7)
def get_active_emoji(self) -> Dict[str, Dict[str, Any]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(
self, include_realm_owners: bool = True
) -> Sequence["UserProfile"]:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
if include_realm_owners:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER]
else:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(
realm=self,
is_active=True,
role__in=roles,
)
def get_human_admin_users(self, include_realm_owners: bool = True) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
if include_realm_owners:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER]
else:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(
realm=self,
is_bot=False,
is_active=True,
role__in=roles,
)
def get_human_billing_admin_and_realm_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(
Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True),
realm=self,
is_bot=False,
is_active=True,
)
def get_active_users(self) -> Sequence["UserProfile"]:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_first_human_user(self) -> Optional["UserProfile"]:
"""A useful value for communications with newly created realms.
Has a few fundamental limitations:
* Its value will be effectively random for realms imported from Slack or
other third-party tools.
* The user may be deactivated, etc., so it's not something that's useful
for features, permissions, etc.
"""
return UserProfile.objects.filter(realm=self, is_bot=False).order_by("id").first()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(
realm=self, is_bot=False, role=UserProfile.ROLE_REALM_OWNER, is_active=True
)
def get_bot_domain(self) -> str:
return get_fake_email_domain(self)
def get_notifications_stream(self) -> Optional["Stream"]:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional["Stream"]:
if (
self.signup_notifications_stream is not None
and not self.signup_notifications_stream.deactivated
):
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600 * 24 * 7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum("size"))["size__sum"]
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
def realm_post_delete_handler(*, instance: Realm, **kwargs: object) -> None:
# This would be better as a functools.partial, but for some reason
# Django doesn't call it even when it's registered as a post_delete handler.
flush_realm(instance=instance, from_deletion=True)
post_save.connect(flush_realm, sender=Realm)
post_delete.connect(realm_post_delete_handler, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def get_realm_by_id(realm_id: int) -> Realm:
return Realm.objects.get(id=realm_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
def get_org_type_display_name(org_type: int) -> str:
for realm_type, realm_type_details in Realm.ORG_TYPES.items():
if realm_type_details["id"] == org_type:
return realm_type_details["name"]
return ""
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains: bool = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values("domain", "allow_subdomains"))
class RealmEmoji(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile",
blank=True,
null=True,
on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(
validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(
regex=r"^[0-9a-z.\-_]+(?<![.\-_])$",
message=gettext_lazy("Invalid characters in emoji name"),
),
]
)
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(
realm: Realm, only_active_emojis: bool = False
) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related("author")
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(
id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id,
)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict["name"]] = emoji_dict
return d
def flush_realm_emoji(*, instance: RealmEmoji, **kwargs: object) -> None:
realm = instance.realm
cache_set(
get_realm_emoji_cache_key(realm), get_realm_emoji_uncached(realm), timeout=3600 * 24 * 7
)
cache_set(
get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600 * 24 * 7,
)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> Pattern[str]:
regex = re.compile(r"^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$")
error_msg = _("Invalid linkifier pattern. Valid characters are {}.").format(
"[ a-zA-Z_#=/:+!-]",
)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
pattern = re.compile(value)
except re.error:
# Regex is invalid
raise ValidationError(error_msg)
return pattern
def filter_format_validator(value: str) -> None:
regex = re.compile(r"^([\.\/:a-zA-Z0-9#_?=&;~-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;~-]*$")
if not regex.match(value):
raise ValidationError(_("Invalid URL format string."))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the Markdown processor. See "Custom filters" in the settings UI.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField()
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def clean(self) -> None:
"""Validate whether the set of parameters in the URL Format string
match the set of parameters in the regular expression.
Django's `full_clean` calls `clean_fields` followed by `clean` method
and stores all ValidationErrors from all stages to return as JSON.
"""
# Extract variables present in the pattern
pattern = filter_pattern_validator(self.pattern)
group_set = set(pattern.groupindex.keys())
# Extract variables used in the URL format string. Note that
# this regex will incorrectly reject patterns that attempt to
# escape % using %%.
found_group_set: Set[str] = set()
group_match_regex = r"(?<!%)%\((?P<group_name>[^()]+)\)s"
for m in re.finditer(group_match_regex, self.url_format_string):
group_name = m.group("group_name")
found_group_set.add(group_name)
# Report patterns missing in linkifier pattern.
missing_in_pattern_set = found_group_set - group_set
if len(missing_in_pattern_set) > 0:
name = list(sorted(missing_in_pattern_set))[0]
raise ValidationError(
_("Group %(name)r in URL format string is not present in linkifier pattern."),
params={"name": name},
)
missing_in_url_set = group_set - found_group_set
# Report patterns missing in URL format string.
if len(missing_in_url_set) > 0:
# We just report the first missing pattern here. Users can
# incrementally resolve errors if there are multiple
# missing patterns.
name = list(sorted(missing_in_url_set))[0]
raise ValidationError(
_("Group %(name)r in linkifier pattern is not present in URL format string."),
params={"name": name},
)
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_linkifiers_cache_key(realm_id: int) -> str:
return f"{cache.KEY_PREFIX}:all_linkifiers_for_realm:{realm_id}"
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_linkifiers_cache: Dict[int, List[LinkifierDict]] = {}
def realm_in_local_linkifiers_cache(realm_id: int) -> bool:
return realm_id in per_request_linkifiers_cache
def linkifiers_for_realm(realm_id: int) -> List[LinkifierDict]:
if not realm_in_local_linkifiers_cache(realm_id):
per_request_linkifiers_cache[realm_id] = linkifiers_for_realm_remote_cache(realm_id)
return per_request_linkifiers_cache[realm_id]
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
"""
Processes data from `linkifiers_for_realm` to return to older clients,
which use the `realm_filters` events.
"""
linkifiers = linkifiers_for_realm(realm_id)
realm_filters: List[Tuple[str, str, int]] = []
for linkifier in linkifiers:
realm_filters.append((linkifier["pattern"], linkifier["url_format"], linkifier["id"]))
return realm_filters
@cache_with_key(get_linkifiers_cache_key, timeout=3600 * 24 * 7)
def linkifiers_for_realm_remote_cache(realm_id: int) -> List[LinkifierDict]:
linkifiers = []
for linkifier in RealmFilter.objects.filter(realm_id=realm_id):
linkifiers.append(
LinkifierDict(
pattern=linkifier.pattern,
url_format=linkifier.url_format_string,
id=linkifier.id,
)
)
return linkifiers
def flush_linkifiers(*, instance: RealmFilter, **kwargs: object) -> None:
realm_id = instance.realm_id
cache_delete(get_linkifiers_cache_key(realm_id))
try:
per_request_linkifiers_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_linkifiers, sender=RealmFilter)
post_delete.connect(flush_linkifiers, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_linkifiers_cache
per_request_linkifiers_cache = {}
class RealmPlayground(models.Model):
"""Server side storage model to store playground information needed by our
'view code in playground' feature in code blocks.
"""
MAX_PYGMENTS_LANGUAGE_LENGTH = 40
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
url_prefix: str = models.TextField(validators=[URLValidator()])
# User-visible display name used when configuring playgrounds in the settings page and
# when displaying them in the playground links popover.
name: str = models.TextField(db_index=True)
# This stores the pygments lexer subclass names and not the aliases themselves.
pygments_language: str = models.CharField(
db_index=True,
max_length=MAX_PYGMENTS_LANGUAGE_LENGTH,
# We validate to see if this conforms to the character set allowed for a
# language in the code block.
validators=[
RegexValidator(
regex=r"^[ a-zA-Z0-9_+-./#]*$", message=_("Invalid characters in pygments language")
)
],
)
class Meta:
unique_together = (("realm", "pygments_language", "name"),)
def __str__(self) -> str:
return f"<RealmPlayground({self.realm.string_id}): {self.pygments_language} {self.name}>"
def get_realm_playgrounds(realm: Realm) -> List[Dict[str, Union[int, str]]]:
playgrounds: List[Dict[str, Union[int, str]]] = []
for playground in RealmPlayground.objects.filter(realm=realm).all():
playgrounds.append(
dict(
id=playground.id,
name=playground.name,
pygments_language=playground.pygments_language,
url_prefix=playground.url_prefix,
)
)
return playgrounds
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {PERSONAL: "personal", STREAM: "stream", HUDDLE: "huddle"}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserBaseSettings(models.Model):
"""This abstract class is the container for all preferences/personal
settings for users that control the behavior of the application.
It was extracted from UserProfile to support the RealmUserDefault
model (i.e. allow individual realms to configure the default
values of these preferences for new users in their organization).
Changing the default value for a field declared here likely
requires a migration to update all RealmUserDefault rows that had
the old default value to have the new default value. Otherwise,
the default change will only affect new users joining Realms
created after the change.
"""
# UI settings
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
# display settings
left_side_userlist: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default="en", max_length=MAX_LANGUAGE_ID_LENGTH)
# This setting controls which view is rendered first when Zulip loads.
# Values for it are URL suffix after `#`.
default_view: str = models.TextField(default="recent_topics")
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=True)
COLOR_SCHEME_AUTOMATIC = 1
COLOR_SCHEME_NIGHT = 2
COLOR_SCHEME_LIGHT = 3
COLOR_SCHEME_CHOICES = [COLOR_SCHEME_AUTOMATIC, COLOR_SCHEME_NIGHT, COLOR_SCHEME_LIGHT]
color_scheme: int = models.PositiveSmallIntegerField(default=COLOR_SCHEME_AUTOMATIC)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the web app.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams: int = models.PositiveSmallIntegerField(
default=DEMOTE_STREAMS_AUTOMATIC
)
# Emojisets
GOOGLE_EMOJISET = "google"
GOOGLE_BLOB_EMOJISET = "google-blob"
TEXT_EMOJISET = "text"
TWITTER_EMOJISET = "twitter"
EMOJISET_CHOICES = (
(GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"),
)
emojiset: str = models.CharField(
default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20
)
### Notifications settings. ###
email_notifications_batching_period_seconds: int = models.IntegerField(default=120)
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default="zulip")
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES
)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
enable_marketing_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Whether or not the user wants to sync their drafts.
enable_drafts_synchronization = models.BooleanField(default=True)
# Define the types of the various automatically managed properties
property_types = dict(
color_scheme=int,
default_language=str,
default_view=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
enable_drafts_synchronization=bool,
enter_sends=bool,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
starred_message_counts=bool,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_marketing_emails=bool,
email_notifications_batching_period_seconds=int,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
class Meta:
abstract = True
class RealmUserDefault(UserBaseSettings):
"""This table stores realm-level default values for user preferences
like notification settings, used when creating a new user account.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class UserProfile(AbstractBaseUser, PermissionsMixin, UserBaseSettings):
USERNAME_FIELD = "email"
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ["*", "`", "\\", ">", '"', "@"]
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: "Generic bot",
INCOMING_WEBHOOK_BOT: "Incoming webhook",
OUTGOING_WEBHOOK_BOT: "Outgoing webhook",
EMBEDDED_BOT: "Embedded bot",
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name
# over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey(
"self", null=True, on_delete=models.SET_NULL
)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MODERATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# Users with this flag set are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
can_forge_sender: bool = models.BooleanField(default=False, db_index=True)
# Users with this flag set can create other users via API.
can_create_users: bool = models.BooleanField(default=False, db_index=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default="")
AVATAR_FROM_GRAVATAR = "G"
AVATAR_FROM_USER = "U"
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, "Hosted by Gravatar"),
(AVATAR_FROM_USER, "Uploaded by user"),
)
avatar_source: str = models.CharField(
default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1
)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = "W"
TUTORIAL_STARTED = "S"
TUTORIAL_FINISHED = "F"
TUTORIAL_STATES = (
(TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"),
)
tutorial_status: str = models.CharField(
default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1
)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default="[]")
zoom_token: Optional[object] = models.JSONField(default=None, null=True)
objects: UserManager = UserManager()
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: gettext_lazy("Organization owner"),
ROLE_REALM_ADMINISTRATOR: gettext_lazy("Organization administrator"),
ROLE_MODERATOR: gettext_lazy("Moderator"),
ROLE_MEMBER: gettext_lazy("Member"),
ROLE_GUEST: gettext_lazy("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {
v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values
}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get(
"rendered_value"
)
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append(
{
"id": field_data["id"],
"name": field_data["name"],
"type": field_data["type"],
"hint": field_data["hint"],
"field_data": field_data["field_data"],
"order": field_data["order"],
"value": value,
"rendered_value": rendered_value,
}
)
return data
def can_admin_user(self, target_user: "UserProfile") -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_provisional_member(self) -> bool:
if self.is_moderator:
return False
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return (
self.role == UserProfile.ROLE_REALM_ADMINISTRATOR
or self.role == UserProfile.ROLE_REALM_OWNER
)
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_moderator(self) -> bool:
return self.role == UserProfile.ROLE_MODERATOR
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if (
self.is_realm_admin
or not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
):
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [
dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES
]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values("id", "email")
return {row["id"]: row["email"] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in [
"add_custom_emoji_policy",
"create_stream_policy",
"edit_topic_policy",
"invite_to_stream_policy",
"invite_to_realm_policy",
"move_messages_between_streams_policy",
"user_group_edit_policy",
]:
raise AssertionError("Invalid policy")
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_NOBODY:
return False
if self.is_realm_admin:
return True
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_moderator:
return True
if policy_value == Realm.POLICY_MODERATORS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
assert policy_value == Realm.POLICY_FULL_MEMBERS_ONLY
return not self.is_provisional_member
def can_create_streams(self) -> bool:
return self.has_permission("create_stream_policy")
def can_subscribe_other_users(self) -> bool:
return self.has_permission("invite_to_stream_policy")
def can_invite_others_to_realm(self) -> bool:
return self.has_permission("invite_to_realm_policy")
def can_move_messages_between_streams(self) -> bool:
return self.has_permission("move_messages_between_streams_policy")
def can_edit_user_groups(self) -> bool:
return self.has_permission("user_group_edit_policy")
def can_edit_topic_of_any_message(self) -> bool:
if self.realm.edit_topic_policy == Realm.POLICY_EVERYONE:
return True
return self.has_permission("edit_topic_policy")
def can_add_custom_emoji(self) -> bool:
return self.has_permission("add_custom_emoji_policy")
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split(".")[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or "root")
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=100)
members: Manager = models.ManyToManyField(UserProfile, through="UserGroupMembership")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default="")
class Meta:
unique_together = (("realm", "name"),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (("user_group", "user_profile"),)
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the full name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated: bool = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, on_delete=CASCADE
)
streams: Manager = models.ManyToManyField("Stream")
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation: bool = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required: bool = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# These values should be consistent with the values
# in settings_config.user_role_values.
INVITE_AS = dict(
REALM_OWNER=100,
REALM_ADMIN=200,
MODERATOR=300,
MEMBER=400,
GUEST_USER=600,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS["MEMBER"])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime
)
class MultiuseInvite(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
referred_by: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
streams: Manager = models.ManyToManyField("Stream")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(
default=PreregistrationUser.INVITE_AS["MEMBER"]
)
class EmailChangeStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, "apns"),
(GCM, "gcm"),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# The user whose device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return secrets.token_hex(16)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default="")
rendered_description: str = models.TextField(default="")
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
STREAM_POST_POLICY_MODERATORS = 4
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
STREAM_POST_POLICY_MODERATORS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32,
default=generate_email_token_for_stream,
unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
"forever": -1,
"realm_default": None,
}
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days",
"date_created",
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result["stream_id"] = self.id
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(self.date_created)
continue
result[field_name] = getattr(self, field_name)
result["is_announcement_only"] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class UserTopic(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted: datetime.datetime = models.DateTimeField(
default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)
)
class Meta:
unique_together = ("user_profile", "stream", "topic_name")
# This model was originally called "MutedTopic". We
# generalized it to "UserTopic", but have not yet done the
# database migration to rename the table and indexes.
db_table = "zerver_mutedtopic"
def __str__(self) -> str:
return f"<UserTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>"
class MutedUser(models.Model):
user_profile = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE)
muted_user = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE)
date_muted: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user_profile", "muted_user")
def __str__(self) -> str:
return f"<MutedUser: {self.user_profile.email} -> {self.muted_user.email}>"
post_save.connect(flush_muting_users_cache, sender=MutedUser)
post_delete.connect(flush_muting_users_cache, sender=MutedUser)
class Client(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def clear_client_cache() -> None: # nocoverage
global get_client_cache
get_client_cache = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f"get_client:{make_safe_digest(name)}"
@cache_with_key(get_client_cache_key, timeout=3600 * 24 * 7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600 * 24 * 7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(name__iexact=stream_name.strip(), realm_id=realm_id)
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
"""
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
"""
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = (
"upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
)
return (
get_active_streams(realm)
.select_related()
.extra(where=[where_clause], params=(list(stream_names),))
)
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return bulk_cached_fetch(
stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name,
)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert recipient.type == Recipient.HUDDLE
return (
Subscription.objects.filter(
recipient=recipient,
)
.order_by("user_profile_id")
.values_list("user_profile_id", flat=True)
)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by("user_profile_id")
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [
subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id
]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField("date sent", db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
archive_transaction: ArchiveTransaction = models.ForeignKey(
ArchiveTransaction, on_delete=CASCADE
)
class Message(AbstractMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
"""
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
"""
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(
rendered_content: Optional[str],
rendered_content_version: Optional[int],
markdown_version: int,
) -> bool:
return (
rendered_content is None
or rendered_content_version is None
or rendered_content_version < markdown_version
)
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (
sending_client
in (
"zulipandroid",
"zulipios",
"zulipdesktop",
"zulipmobile",
"zulipelectron",
"zulipterminal",
"snipe",
"website",
"ios",
"android",
)
) or ("desktop app" in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith("/me "):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by("-id")[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type: str = models.TextField()
content: str = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ["id", "message_id", "sender_id", "msg_type", "content"]
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by("message_id", "id")
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class Draft(models.Model):
"""Server-side storage model for storing drafts so that drafts can be synced across
multiple clients/devices.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
recipient: Optional[Recipient] = models.ForeignKey(
Recipient, null=True, on_delete=models.SET_NULL
)
topic: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField() # Length should not exceed MAX_MESSAGE_LENGTH
last_edit_time: datetime.datetime = models.DateTimeField(db_index=True)
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.user_profile.email} / {self.id} / {self.last_edit_time}>"
def to_dict(self) -> Dict[str, Any]:
if self.recipient is None:
_type = ""
to = []
elif self.recipient.type == Recipient.STREAM:
_type = "stream"
to = [self.recipient.type_id]
else:
_type = "private"
if self.recipient.type == Recipient.PERSONAL:
to = [self.recipient.type_id]
else:
to = []
for r in get_display_recipient(self.recipient):
assert not isinstance(r, str) # It will only be a string for streams
if not r["id"] == self.user_profile_id:
to.append(r["id"])
return {
"id": self.id,
"type": _type,
"to": to,
"topic": self.topic,
"content": self.content,
"timestamp": int(self.last_edit_time.timestamp()),
}
class AbstractEmoji(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = "unicode_emoji"
REALM_EMOJI = "realm_emoji"
ZULIP_EXTRA_EMOJI = "zulip_extra_emoji"
REACTION_TYPES = (
(UNICODE_EMOJI, gettext_lazy("Unicode emoji")),
(REALM_EMOJI, gettext_lazy("Custom emoji")),
(ZULIP_EXTRA_EMOJI, gettext_lazy("Zulip extra emoji")),
)
reaction_type: str = models.CharField(
default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30
)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
class AbstractReaction(AbstractEmoji):
class Meta:
abstract = True
unique_together = (
("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"),
)
class Reaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = [
"message_id",
"emoji_name",
"emoji_code",
"reaction_type",
"user_profile__email",
"user_profile_id",
"user_profile__full_name",
]
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object (that is not long-term idle), we add
# a row to the UserMessage table indicating that that user received
# that message. This table allows us to quickly query any user's last
# 1000 messages to generate the home view and search exactly the
# user's message history.
#
# The long-term idle optimization is extremely important for large,
# open organizations, and is described in detail here:
# https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
#
# In particular, new messages to public streams will only generate
# UserMessage rows for Members who are long_term_idle if they would
# have nonzero flags for the message (E.g. a mention, alert word, or
# mobile push notification).
#
# The flags field stores metadata like whether the user has read the
# message, starred or collapsed the message, was mentioned in the
# message, etc. We use of postgres partial indexes on flags to make
# queries for "User X's messages with flag Y" extremely fast without
# consuming much storage space.
#
# UserMessage is the largest table in many Zulip installations, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
"read",
"starred",
"collapsed",
"mentioned",
"wildcard_mentioned",
# These next 4 flags are from features that have since been removed.
"summarize_in_home",
"summarize_in_stream",
"force_expand",
"force_collapse",
# Whether the message contains any of the user's alert words.
"has_alert_word",
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
"historical",
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
"is_private",
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
"active_mobile_push_notification",
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return "flags & 1 = 0"
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return "flags & 2 <> 0"
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return "flags & 4096 <> 0"
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
"""
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
"""
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(
user_profile: UserProfile, message_id: int
) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(
user_profile=user_profile, message_id=message_id
)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now,
db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
"id": self.id,
"name": self.file_name,
"path_id": self.path_id,
"size": self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
"create_time": int(time.mktime(self.create_time.timetuple()) * 1000),
"messages": [
{
"id": m.id,
"date_sent": int(time.mktime(m.date_sent.timetuple()) * 1000),
}
for m in self.messages.all()
],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (
attachment.is_realm_public
and attachment.realm == user_profile.realm
and user_profile.can_access_public_streams()
):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages],
).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(
id__in=relevant_stream_ids, history_public_to_subscribers=True
).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
# This is a denormalization designed to improve the performance of
# bulk queries of Subscription objects, Whether the subscribed user
# is active tends to be a key condition in those queries.
# We intentionally don't specify a default value to promote thinking
# about this explicitly, as in some special cases, such as data import,
# we may be creating Subscription objects for a user that's deactivated.
is_user_active: bool = models.BooleanField()
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
indexes = [
models.Index(
fields=("recipient", "user_profile"),
name="zerver_subscription_recipient_id_user_profile_id_idx",
condition=Q(active=True, is_user_active=True),
),
]
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600 * 24 * 7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600 * 24 * 7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm
)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
"""
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
"""
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600 * 24 * 7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Realm) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email, realm.id)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600 * 24 * 7)
def get_system_bot(email: str, realm_id: int) -> UserProfile:
"""
This function doesn't use the realm_id argument yet, but requires
passing it as preparation for adding system bots to each realm instead
of having them all in a separate system bot realm.
If you're calling this function, use the id of the realm in which the system
bot will be after that migration. If the bot is supposed to send a message,
the same realm as the one *to* which the message will be sent should be used - because
cross-realm messages will be eliminated as part of the migration.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600 * 24 * 7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600 * 24 * 7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list("id", flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600 * 24 * 7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = (
UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
)
.exclude(
role=UserProfile.ROLE_GUEST,
)
.values_list("id", flat=True)
)
return list(query)
def get_source_profile(email: str, realm_id: int) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm_by_id(realm_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600 * 24 * 7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# group private message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(
lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600 * 24 * 7
)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id, type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [
Subscription(
recipient=recipient,
user_profile_id=user_profile_id,
is_user_active=is_active,
)
for user_profile_id, is_active in UserProfile.objects.filter(id__in=id_list)
.distinct("id")
.values_list("id", "is_active")
]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
"""Data table recording the last time each user hit Zulip endpoints
via which Clients; unlike UserPresence, these data are not exposed
to users via the Zulip API.
Useful for debugging as well as to answer analytics questions like
"How many users have accessed the Zulip mobile app in the last
month?" or "Which users/organizations have recently used API
endpoint X that is about to be desupported" for communications
and database migration purposes.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField("last visit")
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField("start time", db_index=True)
end: datetime.datetime = models.DateTimeField("end time", db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
NOTE: Users can disable updates to this table (see UserProfile.presence_enabled),
so this cannot be used to determine if a user was recently active on Zulip.
The UserActivity table is recommended for that purpose.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField("presence changed")
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the web app.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return "active"
elif status == UserPresence.IDLE:
return "idle"
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f"Unknown status: {status}")
@staticmethod
def to_presence_dict(
client_name: str,
status: int,
dt: datetime.datetime,
push_enabled: bool = False,
has_push_devices: bool = False,
) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == "active":
# See https://github.com/python/mypy/issues/2611
status_val: Optional[int] = UserPresence.ACTIVE
elif status == "idle":
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(AbstractEmoji):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# Override emoji_name and emoji_code field of (AbstractReaction model) to accept
# default value.
emoji_name: str = models.TextField(default="")
emoji_code: str = models.TextField(default="")
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default="")
class DefaultStream(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField("Stream")
description: str = models.CharField(max_length=1024, default="")
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(
name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all().order_by("name")],
)
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class NotificationTriggers:
# "private_message" is for 1:1 PMs as well as huddles
PRIVATE_MESSAGE = "private_message"
MENTION = "mentioned"
WILDCARD_MENTION = "wildcard_mentioned"
STREAM_PUSH = "stream_push_notify"
STREAM_EMAIL = "stream_email_notify"
class ScheduledMessageNotificationEmail(models.Model):
"""Stores planned outgoing message notification emails. They may be
processed earlier should Zulip choose to batch multiple messages
in a single email, but typically will be processed just after
scheduled_timestamp.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
EMAIL_NOTIFICATION_TRIGGER_CHOICES = [
(NotificationTriggers.PRIVATE_MESSAGE, "Private message"),
(NotificationTriggers.MENTION, "Mention"),
(NotificationTriggers.WILDCARD_MENTION, "Wildcard mention"),
(NotificationTriggers.STREAM_EMAIL, "Stream notifications enabled"),
]
trigger: str = models.TextField(choices=EMAIL_NOTIFICATION_TRIGGER_CHOICES)
mentioned_user_group: Optional[UserGroup] = models.ForeignKey(
UserGroup, null=True, on_delete=CASCADE
)
# Timestamp for when the notification should be processed and sent.
# Calculated from the time the event was received and the batching period.
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
class ScheduledMessage(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, "send_later"),
(REMIND, "remind"),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES,
default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
"followup_day1": ScheduledEmail.WELCOME,
"followup_day2": ScheduledEmail.WELCOME,
"digest": ScheduledEmail.DIGEST,
"invitation_reminder": ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = "1"
NEW_VALUE = "2"
ROLE_COUNT = "10"
ROLE_COUNT_HUMANS = "11"
ROLE_COUNT_BOTS = "12"
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
USER_NOTIFICATION_SETTINGS_CHANGED = 132
USER_DIGEST_EMAIL_CREATED = 133
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
REALM_PROPERTY_CHANGED = 207
REALM_ICON_SOURCE_CHANGED = 208
REALM_DISCOUNT_CHANGED = 209
REALM_SPONSORSHIP_APPROVED = 210
REALM_BILLING_METHOD_CHANGED = 211
REALM_REACTIVATION_EMAIL_SENT = 212
REALM_SPONSORSHIP_PENDING_STATUS_CHANGED = 213
REALM_SUBDOMAIN_CHANGED = 214
REALM_CREATED = 215
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
SUBSCRIPTION_PROPERTY_CHANGED = 304
USER_MUTED = 350
USER_UNMUTED = 351
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
STREAM_CREATED = 601
STREAM_DEACTIVATED = 602
STREAM_NAME_CHANGED = 603
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED,
USER_ACTIVATED,
USER_DEACTIVATED,
USER_REACTIVATED,
USER_ROLE_CHANGED,
REALM_DEACTIVATED,
REALM_REACTIVATED,
]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile,
null=True,
related_name="+",
on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile,
null=True,
related_name="+",
on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream,
null=True,
on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, val: object, allow_deactivated: bool = False) -> List[int]:
user_ids = check_list(check_int)("User IDs", val)
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid user ID: {}").format(user_id))
if not allow_deactivated:
if not user_profile.is_active:
raise ValidationError(_("User with ID {} is deactivated").format(user_id))
if user_profile.is_bot:
raise ValidationError(_("User with ID {} is a bot").format(user_id))
return user_ids
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default="", null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
SELECT = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. SELECT require field_data, USER require
# realm as argument.
SELECT_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(SELECT, gettext_lazy("List of options"), validate_select_field, str, "SELECT"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, gettext_lazy("Person picker"), check_valid_user_ids, ast.literal_eval, "USER"),
]
SELECT_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in SELECT_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, display name, validator, converter, keyword
(SHORT_TEXT, gettext_lazy("Short text"), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, gettext_lazy("Long text"), check_long_string, str, "LONG_TEXT"),
(DATE, gettext_lazy("Date picker"), check_date, str, "DATE"),
(URL, gettext_lazy("Link"), check_url, str, "URL"),
(
EXTERNAL_ACCOUNT,
gettext_lazy("External account"),
check_short_string,
str,
"EXTERNAL_ACCOUNT",
),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *SELECT_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator[Union[int, str, List[int]]]] = {
item[0]: item[2] for item in FIELD_TYPE_DATA
}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {
item[0]: item[3] for item in ALL_FIELD_TYPES
}
FIELD_TYPE_CHOICES: List[Tuple[int, Promise]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES,
default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type SELECT store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default="", null=True)
class Meta:
unique_together = ("realm", "name")
def as_dict(self) -> ProfileDataElementBase:
return {
"id": self.id,
"name": self.name,
"type": self.field_type,
"hint": self.hint,
"field_data": self.field_data,
"order": self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by("order")
class CustomProfileFieldValue(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "field")
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query URL, data to be sent to URL,
# and parsing the response.
GENERIC_INTERFACE = "GenericService"
SLACK_INTERFACE = "SlackOutgoingWebhookService"
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: int) -> List[Service]:
return list(Service.objects.filter(user_profile_id=user_profile_id))
def get_service_profile(user_profile_id: int, service_name: str) -> Service:
return Service.objects.get(user_profile_id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain(realm: Realm) -> str:
try:
# Check that realm.host can be used to form valid email addresses.
validate_email(f"bot@{realm.host}")
return realm.host
except ValidationError:
pass
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(
settings.FAKE_EMAIL_DOMAIN + " is not a valid domain. "
"Consider setting the FAKE_EMAIL_DOMAIN setting."
)
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(*, instance: AlertWord, **kwargs: object) -> None:
realm = instance.realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
|
hackerkid/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 148,798
|
[
"VisIt"
] |
a0687008ccc3529961db98f545ed431aa0992d791097a11654864aae1baa2cae
|
import pyspeckit
# Grab a .fits spectrum with a legitimate header
sp = pyspeckit.Spectrum('G031.947+00.076_nh3_11_Tastar.fits')
""" HEADER:
SIMPLE = T / Written by IDL: Tue Aug 31 18:17:01 2010
BITPIX = -64
NAXIS = 1 / number of array dimensions
NAXIS1 = 8192 /Number of positions along axis 1
CDELT1 = -0.077230503
CRPIX1 = 4096.0000
CRVAL1 = 68.365635
CTYPE1 = 'VRAD'
CUNIT1 = 'km/s '
SPECSYS = 'LSRK'
RESTFRQ = 2.3694500e+10
VELOSYS = -43755.930
CDELT1F = 6103.5156
CRPIX1F = 4096.0000
CRVAL1F = 2.3692555e+10
CTYPE1F = 'FREQ'
CUNIT1F = 'Hz'
SPECSYSF= 'LSRK'
RESTFRQF= 2.3694500e+10
VELOSYSF= -43755.930
VDEF = 'RADI-LSR'
SRCVEL = 70.000000
ZSOURCE = 0.00023349487
BUNIT = 'K '
OBJECT = 'G031.947+00.076'
TELESCOP= 'GBT'
TSYS = 42.1655
ELEV = 34.904846
AIRMASS = 1.7475941
LINE = 'nh3_11'
FREQ = 23.692555
TARGLON = 31.947236
TARGLAT = 0.076291610
MJD-AVG = 54548.620
CONTINUU= 0.0477613
CONTERR = 0.226990
SMTHOFF = 0
COMMENT 1 blank line
END
"""
# Change the plot range to be a reasonable physical coverage (the default is to
# plot the whole 8192 channel spectrum)
sp.plotter(xmin=-100,xmax=300)
# There are many extra channels, so let's smooth. Default is a Gaussian
# smooth. Downsampling helps speed up the fitting (assuming the line is still
# Nyquist sampled, which it is)
sp.smooth(2)
# replot after smoothing
sp.plotter(xmin=-100,xmax=300)
# First, fit a gaussian to the whole spectrum as a "first guess" (good at
# picking up the centroid, bad at getting the width right)
# negamp=False forces the fitter to search for a positive peak, not the
# negatives created in this spectrum by frequency switching
sp.specfit.selectregion(xmin=60,xmax=120,xtype='wcs')
sp.specfit(negamp=False, guesses='moments')
# Save the fit...
sp.plotter.figure.savefig('nh3_gaussfit.png')
# and print some information to screen
print "Guesses: ", sp.specfit.guesses
print "Best fit: ", sp.specfit.modelpars
# Run the ammonia spec fitter with a reasonable guess
sp.specfit(fittype='ammonia_tau',
guesses=[5.9,4.45,4.5,0.84,96.2,0.43],
quiet=False)
# plot up the residuals in a different window. The residuals strongly suggest
# the presence of a second velocity component.
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_vtau_fit.png')
print "Guesses: ", sp.specfit.guesses
print "Best fit: ", sp.specfit.modelpars
# re-plot zoomed in
sp.plotter(xmin=70,xmax=125)
# replot the fit
sp.specfit.plot_fit()
sp.plotter.figure.savefig('nh3_ammonia_fit_vtau_zoom.png')
# refit with two components
sp.specfit(fittype='ammonia_tau',
guesses=[4,3.5,4.5,0.68,97.3,0.5]+[15,4.2,4.5,0.52,95.8,0.35],
quiet=False)
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_multifit_vtau_zoom.png')
|
bsipocz/pyspeckit
|
examples/ammonia_vtau_fit_example.py
|
Python
|
mit
| 2,881
|
[
"Gaussian"
] |
bf8301609de0681b225ae9df9baa956bba35e2c22d69f94633a28acc03d165e6
|
# Copyright 2007-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from Bio._py3k import basestring
import os
import warnings
try:
from StringIO import StringIO # Python 2
# Can't use cStringIO, quoting the documentation,
# "Unlike the StringIO module, this module is not able to accept
# Unicode strings that cannot be encoded as plain ASCII strings."
# Therefore can't use from Bio._py3k import StringIO
except ImportError:
from io import StringIO # Python 3
from io import BytesIO
from Bio import BiopythonWarning, BiopythonParserWarning
from Bio import SeqIO
from Bio import AlignIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq, UnknownSeq
from Bio import Alphabet
from Bio.Align import MultipleSeqAlignment
# TODO - Convert this to using unittest, and check desired warnings
# are issued. Used to do that by capturing warnings to stdout and
# verifying via the print-and-compare check. However, there was some
# frustrating cross-platform inconsistency I couldn't resolve.
protein_alphas = [Alphabet.generic_protein]
dna_alphas = [Alphabet.generic_dna]
rna_alphas = [Alphabet.generic_rna]
nucleotide_alphas = [Alphabet.generic_nucleotide,
Alphabet.Gapped(Alphabet.generic_nucleotide)]
no_alpha_formats = ["fasta", "clustal", "phylip", "phylip-relaxed",
"phylip-sequential", "tab", "ig",
"stockholm", "emboss", "fastq", "fastq-solexa",
"fastq-illumina", "qual"]
possible_unknown_seq_formats = ["qual", "genbank", "gb", "embl", "imgt"]
# List of formats including alignment only file formats we can read AND write.
# The list is initially hard coded to preserve the original order of the unit
# test output, with any new formats added since appended to the end.
test_write_read_alignment_formats = ["fasta", "clustal", "phylip", "stockholm",
"phylip-relaxed"]
for format in sorted(SeqIO._FormatToWriter):
if format not in test_write_read_alignment_formats:
test_write_read_alignment_formats.append(format)
for format in sorted(AlignIO._FormatToWriter):
if format not in test_write_read_alignment_formats:
test_write_read_alignment_formats.append(format)
test_write_read_alignment_formats.remove("gb") # an alias for genbank
test_write_read_alignment_formats.remove("fastq-sanger") # an alias for fastq
# test_files is a list of tuples containing:
# - string: file format
# - boolean: alignment (requires all seqs be same length)
# - string: relative filename
# - integer: number of sequences
test_files = [
("sff", False, 'Roche/E3MFGYR02_random_10_reads.sff', 10),
# Following examples are also used in test_Clustalw.py
("clustal", True, 'Clustalw/cw02.aln', 2),
("clustal", True, 'Clustalw/opuntia.aln', 7),
("clustal", True, 'Clustalw/hedgehog.aln', 5),
("clustal", True, 'Clustalw/odd_consensus.aln', 2),
# Following nucleic examples are also used in test_SeqIO_FastaIO.py
("fasta", False, 'Fasta/lupine.nu', 1),
("fasta", False, 'Fasta/elderberry.nu', 1),
("fasta", False, 'Fasta/phlox.nu', 1),
("fasta", False, 'Fasta/centaurea.nu', 1),
("fasta", False, 'Fasta/wisteria.nu', 1),
("fasta", False, 'Fasta/sweetpea.nu', 1),
("fasta", False, 'Fasta/lavender.nu', 1),
# Following protein examples are also used in test_SeqIO_FastaIO.py
("fasta", False, 'Fasta/aster.pro', 1),
("fasta", False, 'Fasta/loveliesbleeding.pro', 1),
("fasta", False, 'Fasta/rose.pro', 1),
("fasta", False, 'Fasta/rosemary.pro', 1),
# Following examples are also used in test_BioSQL_SeqIO.py
("fasta", False, 'Fasta/f001', 1), # Protein
("fasta", False, 'Fasta/f002', 3), # DNA
# ("fasta", False, 'Fasta/f003', 2), # Protein with comments
("fasta", False, 'Fasta/fa01', 2), # Protein with gaps
# Following are also used in test_SeqIO_features.py, see also NC_005816.gb
("fasta", False, 'GenBank/NC_005816.fna', 1),
("fasta", False, 'GenBank/NC_005816.ffn', 10),
("fasta", False, 'GenBank/NC_005816.faa', 10),
("fasta", False, 'GenBank/NC_000932.faa', 85),
("tab", False, 'GenBank/NC_005816.tsv', 10), # FASTA -> Tabbed
# Following examples are also used in test_GFF.py
("fasta", False, 'GFF/NC_001802.fna', 1), # upper case
("fasta", False, 'GFF/NC_001802lc.fna', 1), # lower case
("fasta", True, 'GFF/multi.fna', 3), # Trivial nucleotide alignment
# Following example is also used in test_registry.py
("fasta", False, 'Registry/seqs.fasta', 2), # contains blank line
# Following example is also used in test_Nexus.py
("nexus", True, 'Nexus/test_Nexus_input.nex', 9),
# Following examples are also used in test_SwissProt.py
("swiss", False, 'SwissProt/sp001', 1),
("swiss", False, 'SwissProt/sp002', 1),
("swiss", False, 'SwissProt/sp003', 1),
("swiss", False, 'SwissProt/sp004', 1),
("swiss", False, 'SwissProt/sp005', 1),
("swiss", False, 'SwissProt/sp006', 1),
("swiss", False, 'SwissProt/sp007', 1),
("swiss", False, 'SwissProt/sp008', 1),
("swiss", False, 'SwissProt/sp009', 1),
("swiss", False, 'SwissProt/sp010', 1),
("swiss", False, 'SwissProt/sp011', 1),
("swiss", False, 'SwissProt/sp012', 1),
("swiss", False, 'SwissProt/sp013', 1),
("swiss", False, 'SwissProt/sp014', 1),
("swiss", False, 'SwissProt/sp015', 1),
("swiss", False, 'SwissProt/sp016', 1),
# Following example is also used in test_registry.py
("swiss", False, 'Registry/EDD_RAT.dat', 1),
# Following examples are also used in test_Uniprot.py
("uniprot-xml", False, 'SwissProt/uni001', 1),
("uniprot-xml", False, 'SwissProt/uni002', 3),
("uniprot-xml", False, 'SwissProt/Q13639.xml', 1),
("swiss", False, 'SwissProt/Q13639.txt', 1),
("uniprot-xml", False, 'SwissProt/H2CNN8.xml', 1),
("swiss", False, "SwissProt/H2CNN8.txt", 1),
# Following examples are also used in test_GenBank.py
("genbank", False, 'GenBank/noref.gb', 1),
("genbank", False, 'GenBank/cor6_6.gb', 6),
("genbank", False, 'GenBank/iro.gb', 1),
("genbank", False, 'GenBank/pri1.gb', 1),
("genbank", False, 'GenBank/arab1.gb', 1),
("genbank", False, 'GenBank/protein_refseq.gb', 1), # Old version
("genbank", False, 'GenBank/protein_refseq2.gb', 1), # Revised version
("genbank", False, 'GenBank/extra_keywords.gb', 1),
("genbank", False, 'GenBank/one_of.gb', 1),
("genbank", False, 'GenBank/NT_019265.gb', 1), # contig, no sequence
("genbank", False, 'GenBank/origin_line.gb', 1),
("genbank", False, 'GenBank/blank_seq.gb', 1),
("genbank", False, 'GenBank/dbsource_wrap.gb', 1),
("genbank", False, 'GenBank/NC_005816.gb', 1), # See also AE017046.embl
("genbank", False, 'GenBank/NC_000932.gb', 1),
# Odd LOCUS line from Vector NTI
("genbank", False, 'GenBank/pBAD30.gb', 1),
# The next example is a truncated copy of gbvrl1.seq from
# ftp://ftp.ncbi.nih.gov/genbank/gbvrl1.seq.gz
# This includes an NCBI header, and the first three records:
("genbank", False, 'GenBank/gbvrl1_start.seq', 3),
# Following files are also used in test_GFF.py
("genbank", False, 'GFF/NC_001422.gbk', 1),
# Generated with Entrez.efetch("protein", id="16130152",
# rettype="gbwithparts")
("genbank", False, 'GenBank/NP_416719.gbwithparts', 1),
# GenPept file with nasty bond locations,
("genbank", False, 'GenBank/1MRR_A.gp', 1),
# Following files are currently only used here or in test_SeqIO_index.py:
("embl", False, 'EMBL/epo_prt_selection.embl', 9), # proteins
("embl", False, 'EMBL/patents.embl', 4), # more proteins, but no seq
("embl", False, 'EMBL/TRBG361.embl', 1),
("embl", False, 'EMBL/DD231055_edited.embl', 1),
("embl", False, 'EMBL/DD231055_edited2.embl', 1), # Partial ID line
("embl", False, 'EMBL/SC10H5.embl', 1), # Pre 2006 style ID line
("embl", False, 'EMBL/U87107.embl', 1), # Old ID line with SV line
("embl", False, 'EMBL/AAA03323.embl', 1), # 2008, PA line but no AC
("embl", False, 'EMBL/AE017046.embl', 1), # See also NC_005816.gb
("embl", False, 'EMBL/Human_contigs.embl', 2), # contigs, no sequences
# wrapped locations and unspecified type
("embl", False, 'EMBL/location_wrap.embl', 1),
# features over indented for EMBL
("embl", False, 'EMBL/A04195.imgt', 1),
# features over indented for EMBL
("imgt", False, 'EMBL/A04195.imgt', 1),
("stockholm", True, 'Stockholm/simple.sth', 2),
("stockholm", True, 'Stockholm/funny.sth', 6),
# Following PHYLIP files are currently only used here and in test_AlignIO.py,
# and are mostly from Joseph Felsenstein's PHYLIP v3.6 documentation:
("phylip", True, 'Phylip/reference_dna.phy', 6),
("phylip", True, 'Phylip/reference_dna2.phy', 6),
("phylip", True, 'Phylip/hennigian.phy', 10),
("phylip", True, 'Phylip/horses.phy', 10),
("phylip", True, 'Phylip/random.phy', 10),
("phylip", True, 'Phylip/interlaced.phy', 3),
("phylip", True, 'Phylip/interlaced2.phy', 4),
# Following are EMBOSS simple or pairs format alignments
("emboss", True, 'Emboss/alignret.txt', 4),
("emboss", False, 'Emboss/needle.txt', 10),
("emboss", True, 'Emboss/water.txt', 2),
# Following PHD (PHRAP) sequencing files are also used in test_Phd.py
("phd", False, 'Phd/phd1', 3),
("phd", False, 'Phd/phd2', 1),
("phd", False, 'Phd/phd_solexa', 2),
("phd", False, 'Phd/phd_454', 1),
# Following ACE assembly files are also used in test_Ace.py
("ace", False, 'Ace/contig1.ace', 2),
("ace", False, 'Ace/consed_sample.ace', 1),
("ace", False, 'Ace/seq.cap.ace', 1),
# Following IntelliGenetics / MASE files are also used in
# test_intelligenetics.py
("ig", False, 'IntelliGenetics/TAT_mase_nuc.txt', 17),
("ig", True, 'IntelliGenetics/VIF_mase-pro.txt', 16),
# This next file is a MASE alignment but sequence O_ANT70 is shorter than
# the others (so as an alignment will fail). Perhaps MASE doesn't
# write trailing gaps?
("ig", False, 'IntelliGenetics/vpu_nucaligned.txt', 9),
# Following NBRD-PIR files are used in test_nbrf.py
("pir", False, 'NBRF/B_nuc.pir', 444),
("pir", False, 'NBRF/Cw_prot.pir', 111),
("pir", False, 'NBRF/DMA_nuc.pir', 4),
("pir", False, 'NBRF/DMB_prot.pir', 6),
("pir", True, 'NBRF/clustalw.pir', 2),
# Following quality files are also used in the Bio.SeqIO.QualityIO
# doctests:
("fasta", True, 'Quality/example.fasta', 3),
("qual", False, 'Quality/example.qual', 3),
("fastq", True, 'Quality/example.fastq', 3), # Unix new lines
("fastq", True, 'Quality/example_dos.fastq', 3), # DOS/Windows new lines
("fastq", True, 'Quality/tricky.fastq', 4),
("fastq", False, 'Quality/sanger_faked.fastq', 1),
("fastq", False, 'Quality/sanger_93.fastq', 1),
("fastq-illumina", False, 'Quality/illumina_faked.fastq', 1),
("fastq-solexa", False, 'Quality/solexa_faked.fastq', 1),
("fastq-solexa", True, 'Quality/solexa_example.fastq', 5),
# Following examples are also used in test_SeqXML.py
("seqxml", False, 'SeqXML/dna_example.xml', 4),
("seqxml", False, 'SeqXML/rna_example.xml', 5),
("seqxml", False, 'SeqXML/protein_example.xml', 5),
# Following examples are also used in test_SeqIO_AbiIO.py
("abi", False, 'Abi/310.ab1', 1),
("abi", False, 'Abi/3100.ab1', 1),
("abi", False, 'Abi/3730.ab1', 1),
]
class ForwardOnlyHandle(object):
"""Mimic a network handle without seek and tell methods etc."""
def __init__(self, handle):
self._handle = handle
def __iter__(self):
return iter(self._handle)
def read(self, length=None):
if length is None:
return self._handle.read()
else:
return self._handle.read(length)
def readline(self):
return self._handle.readline()
def close(self):
return self._handle.close()
def compare_record(record_one, record_two):
"""This is meant to be a strict comparison for exact agreement..."""
assert isinstance(record_one, SeqRecord)
assert isinstance(record_two, SeqRecord)
assert record_one.seq is not None
assert record_two.seq is not None
if record_one.id != record_two.id:
return False
if record_one.name != record_two.name:
return False
if record_one.description != record_two.description:
return False
if len(record_one) != len(record_two):
return False
if isinstance(record_one.seq, UnknownSeq) \
and isinstance(record_two.seq, UnknownSeq):
# Jython didn't like us comparing the string of very long UnknownSeq
# object (out of heap memory error)
if record_one.seq._character != record_two.seq._character:
return False
elif str(record_one.seq) != str(record_two.seq):
return False
# TODO - check features and annotation (see code for BioSQL tests)
for key in set(record_one.letter_annotations).intersection(
record_two.letter_annotations):
if record_one.letter_annotations[key] != \
record_two.letter_annotations[key]:
return False
return True
def record_summary(record, indent=" "):
"""Returns a concise summary of a SeqRecord object as a string"""
if record.id == record.name:
answer = "%sID and Name='%s',\n%sSeq='" % (indent, record.id, indent)
else:
answer = "%sID = '%s', Name='%s',\n%sSeq='" % (
indent, record.id, record.name, indent)
if record.seq is None:
answer += "None"
else:
if len(record.seq) > 50:
answer += str(record.seq[:40]) + "..." + str(record.seq[-7:])
else:
answer += str(record.seq)
answer += "', length=%i" % (len(record.seq))
return answer
def col_summary(col_text):
if len(col_text) < 65:
return col_text
else:
return col_text[:60] + "..." + col_text[-5:]
def alignment_summary(alignment, index=" "):
"""Returns a concise summary of an Alignment object as a string"""
answer = []
alignment_len = alignment.get_alignment_length()
rec_count = len(alignment)
for i in range(min(5, alignment_len)):
answer.append(index + col_summary(alignment[:, i])
+ " alignment column %i" % i)
if alignment_len > 5:
i = alignment_len - 1
answer.append(index + col_summary("|" * rec_count)
+ " ...")
answer.append(index + col_summary(alignment[:, i])
+ " alignment column %i" % i)
return "\n".join(answer)
def check_simple_write_read(records, indent=" "):
# print(indent+"Checking we can write and then read back these records")
for format in test_write_read_alignment_formats:
if format not in possible_unknown_seq_formats \
and isinstance(records[0].seq, UnknownSeq) \
and len(records[0].seq) > 100:
# Skipping for speed. Some of the unknown sequences are
# rather long, and it seems a bit pointless to record them.
continue
print(indent + "Checking can write/read as '%s' format" % format)
# Going to write to a handle...
if format in SeqIO._BinaryFormats:
handle = BytesIO()
else:
handle = StringIO()
try:
with warnings.catch_warnings():
# e.g. data loss
warnings.simplefilter("ignore", BiopythonWarning)
c = SeqIO.write(
sequences=records, handle=handle, format=format)
assert c == len(records)
except (TypeError, ValueError) as e:
# This is often expected to happen, for example when we try and
# write sequences of different lengths to an alignment file.
if "len()" in str(e):
# Python 2.4.3,
# >>> len(None)
# ...
# TypeError: len() of unsized object
#
# Python 2.5.2,
# >>> len(None)
# ...
# TypeError: object of type 'NoneType' has no len()
print("Failed: Probably len() of None")
else:
print(indent + "Failed: %s" % str(e))
if records[0].seq.alphabet.letters is not None:
assert format != t_format, \
"Should be able to re-write in the original format!"
# Carry on to the next format:
continue
handle.flush()
handle.seek(0)
# Now ready to read back from the handle...
try:
records2 = list(SeqIO.parse(handle=handle, format=format))
except ValueError as e:
# This is BAD. We can't read our own output.
# I want to see the output when called from the test harness,
# run_tests.py (which can be funny about new lines on Windows)
handle.seek(0)
raise ValueError("%s\n\n%s\n\n%s"
% (str(e), repr(handle.read()), repr(records)))
assert len(records2) == t_count
for r1, r2 in zip(records, records2):
# Check the bare minimum (ID and sequence) as
# many formats can't store more than that.
assert len(r1) == len(r2)
# Check the sequence
if format in ["gb", "genbank", "embl", "imgt"]:
# The GenBank/EMBL parsers will convert to upper case.
if isinstance(r1.seq, UnknownSeq) \
and isinstance(r2.seq, UnknownSeq):
# Jython didn't like us comparing the string of very long
# UnknownSeq object (out of heap memory error)
assert r1.seq._character.upper() == r2.seq._character
else:
assert str(r1.seq).upper() == str(r2.seq)
elif format == "qual":
assert isinstance(r2.seq, UnknownSeq)
assert len(r2) == len(r1)
else:
assert str(r1.seq) == str(r2.seq)
# Beware of different quirks and limitations in the
# valid character sets and the identifier lengths!
if format in ["phylip", "phylip-sequential"]:
assert r1.id.replace("[", "").replace("]", "")[:10] == r2.id, \
"'%s' vs '%s'" % (r1.id, r2.id)
elif format == "phylip-relaxed":
assert r1.id.replace(" ", "").replace(':', '|') == r2.id, \
"'%s' vs '%s'" % (r1.id, r2.id)
elif format == "clustal":
assert r1.id.replace(" ", "_")[:30] == r2.id, \
"'%s' vs '%s'" % (r1.id, r2.id)
elif format == "stockholm":
assert r1.id.replace(" ", "_") == r2.id, \
"'%s' vs '%s'" % (r1.id, r2.id)
elif format == "fasta":
assert r1.id.split()[0] == r2.id
else:
assert r1.id == r2.id, \
"'%s' vs '%s'" % (r1.id, r2.id)
if len(records) > 1:
# Try writing just one record (passing a SeqRecord, not a list)
if format in SeqIO._BinaryFormats:
handle = BytesIO()
else:
handle = StringIO()
SeqIO.write(records[0], handle, format)
assert handle.getvalue() == records[0].format(format)
# Check parsers can cope with an empty file
for t_format in SeqIO._FormatToIterator:
if t_format in SeqIO._BinaryFormats or \
t_format in ("uniprot-xml", "pdb-seqres", "pdb-atom"):
# Not allowed empty SFF files.
continue
handle = StringIO()
records = list(SeqIO.parse(handle, t_format))
assert len(records) == 0
for (t_format, t_alignment, t_filename, t_count) in test_files:
if t_format in SeqIO._BinaryFormats:
mode = "rb"
else:
mode = "r"
print("Testing reading %s format file %s" % (t_format, t_filename))
assert os.path.isfile(t_filename), t_filename
with warnings.catch_warnings():
# e.g. BiopythonParserWarning: Dropping bond qualifier in feature
# location
warnings.simplefilter("ignore", BiopythonParserWarning)
# Try as an iterator using handle
h = open(t_filename, mode)
records = list(SeqIO.parse(handle=h, format=t_format))
h.close()
assert len(records) == t_count, \
"Found %i records but expected %i" % (len(records), t_count)
# Try using the iterator with a for loop, and a filename not handle
records2 = []
for record in SeqIO.parse(t_filename, format=t_format):
records2.append(record)
assert len(records2) == t_count
# Try using the iterator with the next() method
records3 = []
h = open(t_filename, mode)
seq_iterator = SeqIO.parse(handle=h, format=t_format)
while True:
try:
record = next(seq_iterator)
except StopIteration:
break
assert record is not None, "Should raise StopIteration not return None"
records3.append(record)
h.close()
# Try a mixture of next() and list (a torture test!)
h = open(t_filename, mode)
seq_iterator = SeqIO.parse(handle=h, format=t_format)
try:
record = next(seq_iterator)
except StopIteration:
record = None
if record is not None:
records4 = [record]
records4.extend(list(seq_iterator))
else:
records4 = []
assert len(records4) == t_count
h.close()
# Try a mixture of next() and for loop (a torture test!)
# with a forward-only-handle
if t_format == "abi":
# Temp hack
h = open(t_filename, mode)
else:
h = ForwardOnlyHandle(open(t_filename, mode))
seq_iterator = SeqIO.parse(h, format=t_format)
try:
record = next(seq_iterator)
except StopIteration:
record = None
if record is not None:
records5 = [record]
for record in seq_iterator:
records5.append(record)
else:
records5 = []
assert len(records5) == t_count
h.close()
for i in range(t_count):
record = records[i]
# Check returned expected object type
assert isinstance(record, SeqRecord)
if t_format in possible_unknown_seq_formats:
assert isinstance(record.seq, Seq) or \
isinstance(record.seq, UnknownSeq)
else:
assert isinstance(record.seq, Seq)
assert isinstance(record.id, basestring)
assert isinstance(record.name, basestring)
assert isinstance(record.description, basestring)
assert record.id != ""
if "accessions" in record.annotations:
accs = record.annotations["accessions"]
# Check for blanks, or entries with leading/trailing spaces
for acc in accs:
assert acc and acc == acc.strip(), \
"Bad accession in annotations: %s" % repr(acc)
assert len(set(accs)) == len(accs), \
"Repeated accession in annotations: %s" % repr(accs)
for ref in record.dbxrefs:
assert ref and ref == ref.strip(), \
"Bad cross reference in dbxrefs: %s" % repr(ref)
assert len(record.dbxrefs) == len(record.dbxrefs), \
"Repeated cross reference in dbxrefs: %s" % repr(record.dbxrefs)
# Check the lists obtained by the different methods agree
assert compare_record(record, records2[i])
assert compare_record(record, records3[i])
assert compare_record(record, records4[i])
assert compare_record(record, records5[i])
if i < 3:
print(record_summary(record))
# Only printed the only first three records: 0,1,2
if t_count > 4:
print(" ...")
if t_count > 3:
print(record_summary(records[-1]))
# Check Bio.SeqIO.read(...)
if t_count == 1:
record = SeqIO.read(t_filename, format=t_format)
assert isinstance(record, SeqRecord)
else:
try:
record = SeqIO.read(t_filename, t_format)
assert False, "Bio.SeqIO.read(...) should have failed"
except ValueError:
# Expected to fail
pass
# Check alphabets
for record in records:
base_alpha = Alphabet._get_base_alphabet(record.seq.alphabet)
if isinstance(base_alpha, Alphabet.SingleLetterAlphabet):
if t_format in no_alpha_formats:
# Too harsh?
assert base_alpha == Alphabet.single_letter_alphabet
else:
base_alpha = None
if base_alpha is None:
good = []
bad = []
given_alpha = None
elif isinstance(base_alpha, Alphabet.ProteinAlphabet):
good = protein_alphas
bad = dna_alphas + rna_alphas + nucleotide_alphas
elif isinstance(base_alpha, Alphabet.RNAAlphabet):
good = nucleotide_alphas + rna_alphas
bad = protein_alphas + dna_alphas
elif isinstance(base_alpha, Alphabet.DNAAlphabet):
good = nucleotide_alphas + dna_alphas
bad = protein_alphas + rna_alphas
elif isinstance(base_alpha, Alphabet.NucleotideAlphabet):
good = nucleotide_alphas
bad = protein_alphas
else:
assert t_format in no_alpha_formats, "Got %s from %s file" \
% (repr(base_alpha), t_format)
good = protein_alphas + dna_alphas + rna_alphas + nucleotide_alphas
bad = []
for given_alpha in good:
# These should all work...
given_base = Alphabet._get_base_alphabet(given_alpha)
for record in SeqIO.parse(t_filename, t_format, given_alpha):
base_alpha = Alphabet._get_base_alphabet(record.seq.alphabet)
assert isinstance(base_alpha, given_base.__class__)
assert base_alpha == given_base
if t_count == 1:
h = open(t_filename, mode)
record = SeqIO.read(h, t_format, given_alpha)
h.close()
assert isinstance(base_alpha, given_base.__class__)
assert base_alpha == given_base
for given_alpha in bad:
# These should all fail...
h = open(t_filename, mode)
try:
print(next(SeqIO.parse(h, t_format, given_alpha)))
h.close()
assert False, "Forcing wrong alphabet, %s, should fail (%s)" \
% (repr(given_alpha), t_filename)
except ValueError:
# Good - should fail
pass
h.close()
del good, bad, given_alpha, base_alpha
if t_alignment:
print("Testing reading %s format file %s as an alignment"
% (t_format, t_filename))
alignment = MultipleSeqAlignment(SeqIO.parse(
handle=t_filename, format=t_format))
assert len(alignment) == t_count
alignment_len = alignment.get_alignment_length()
# Check the record order agrees, and double check the
# sequence lengths all agree too.
for i in range(t_count):
assert compare_record(records[i], alignment[i])
assert len(records[i].seq) == alignment_len
print(alignment_summary(alignment))
# Some alignment file formats have magic characters which mean
# use the letter in this position in the first sequence.
# They should all have been converted by the parser, but if
# not reversing the record order might expose an error. Maybe.
records.reverse()
check_simple_write_read(records)
print("Finished tested reading files")
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_SeqIO.py
|
Python
|
gpl-2.0
| 28,253
|
[
"Biopython"
] |
d4c6c480ca4b9bc92015ae37b552777984a77d2ab5da76f48006585671b73131
|
"""
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import fitter
import matplotlib.cbook as mpcb
import copy
import model
from astropy import log
import astropy.units as u
from . import mpfit_messages
from ammonia_constants import (line_names, freq_dict, aval_dict, ortho_dict,
voff_lines_dict, tau_wts_dict)
def ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1, xoff_v=0.0,
fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
background_tb=2.7315,
thin=False, verbose=False, return_components=False, debug=False):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
ntot: float
can be specified as a column density (e.g., 10^15) or a
log-column-density (e.g., 15)
tex: float or None
Excitation temperature. Assumed LTE if unspecified (``None``), if
tex>tkin, or if ``thin`` is specified.
ntot: float
Total column density of NH3. Can be specified as a float in the range
5-25 or an exponential (1e5-1e25)
width: float
Line width in km/s
xoff_v: float
Line offset in km/s
fortho: float
Fraction of NH3 molecules in ortho state. Default assumes all para
(fortho=0).
tau: None or float
If tau (optical depth in the 1-1 line) is specified, ntot is NOT fit
but is set to a fixed value. The optical depths of the other lines are
fixed relative to tau_oneone
fillingfraction: None or float
fillingfraction is an arbitrary scaling factor to apply to the model
return_tau: bool
Return a dictionary of the optical depths in each line instead of a
synthetic spectrum
thin: bool
uses a different parametetrization and requires only the optical depth,
width, offset, and tkin to be specified. In the 'thin' approximation,
tex is not used in computation of the partition function - LTE is
implicitly assumed
return_components: bool
Return a list of arrays, one for each hyperfine component, instead of
just one array
background_tb : float
The background brightness temperature. Defaults to TCMB.
verbose: bool
More messages
debug: bool
For debugging.
Returns
-------
spectrum: `numpy.ndarray`
Synthetic spectrum with same shape as ``xarr``
component_list: list
List of `numpy.ndarray`'s, one for each hyperfine component
tau_dict: dict
Dictionary of optical depth values for the various lines
(if ``return_tau`` is set)
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('GHz')
if tex is not None:
# Yes, you certainly can have nonthermal excitation, tex>tkin.
#if tex > tkin: # cannot have Tex > Tkin
# tex = tkin
if thin: # tex is not used in this case
tex = tkin
else:
tex = tkin
if thin:
ntot = 1e15
elif 5 < ntot < 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
ntot = 10**ntot
elif (25 < ntot < 1e5) or (ntot < 5):
# these are totally invalid for log/non-log
return 0
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)
# Generate Partition Functions
nlevs = 51
jv=np.arange(nlevs)
ortho = jv % 3 == 0
para = True-ortho
Jpara = jv[para]
Jortho = jv[ortho]
Brot = 298117.06e6
Crot = 186726.36e6
runspec = np.zeros(len(xarr))
tau_dict = {}
para_count = 0
ortho_count = 1 # ignore 0-0
if tau is not None and thin:
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic temperature
Tex is still a free parameter in the final spectrum calculation at the bottom
(technically, I think this process assumes LTE; Tex should come into play in
these equations, not just the final one)
"""
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
else:
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
Given the complexity of these equations, it would be worth my while to
comment each step carefully.
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*tkin))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*tkin))
for linename in line_names:
if ortho_dict[linename]:
orthoparafrac = fortho
Z = Zortho
count = ortho_count
ortho_count += 1
else:
orthoparafrac = 1.0-fortho
Z = Zpara
count = para_count # need to treat partition function separately
para_count += 1
# short variable names for readability
frq = freq_dict[linename]
partition = Z[count]
aval = aval_dict[linename]
# Friesen 2009 eqn A4 points out that the partition function actually says
# how many molecules are in the NH3(1-1) state, both upper *and* lower.
# population_upperlower = ntot * orthoparafrac * partition/(Z.sum())
# population_upperstate = population_upperlower / (1+np.exp(h*frq/(kb*tex)))
#
# Note Jan 1, 2015: This is accounted for in the eqn below. The
# only difference is that I have used Tkin where Friesen et al 2009
# use Tex. Since Tex describes which states are populated, that may
# be the correct one to use.
# Total population of the higher energy inversion transition
population_upperstate = ntot * orthoparafrac * partition/(Z.sum())
tau_dict[linename] = (population_upperstate /
(1. + np.exp(-h*frq/(kb*tkin) ))*ccms**2 /
(8*np.pi*frq**2) * aval *
(1-np.exp(-h*frq/(kb*tex))) /
(width/ckms*frq*np.sqrt(2*np.pi)) )
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None and not thin:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in tau_dict.iteritems():
tau_dict[linename] = t * tau/tau11_temp
components =[]
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,nuo in enumerate(nuoff):
tauprof += (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr.value+nuo-lines[kk])**2 / (2.0*nuwidth[kk]**2)) *
fillingfraction)
components.append( tauprof )
T0 = (h*xarr.value*1e9/kb) # "temperature" of wavelength
if tau is not None and thin:
#runspec = tauprof+runspec
# is there ever a case where you want to ignore the optical depth function? I think no
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-tauprof))+runspec
else:
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-tauprof))+runspec
if runspec.min() < 0 and background_tb == 2.7315:
raise ValueError("Model dropped below zero. That is not possible normally. Here are the input values: "+
("tex: %f " % tex) +
("tkin: %f " % tkin) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
log.info("tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction))
if return_components:
return (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-1*np.array(components)))
if return_tau:
return tau_dict
return runspec
class ammonia_model(model.SpectralModel):
def __init__(self,npeaks=1,npars=6,
parnames=['tkin','tex','ntot','width','xoff_v','fortho'],
**kwargs):
self.npeaks = npeaks
self.npars = npars
self._default_parnames = parnames
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# Remove keywords parsed by parinfo and ignored by the fitter
for kw in ('tied','partied'):
if kw in kwargs:
kwargs.pop(kw)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'tkin' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
self.use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else False
self.fitunits = 'GHz'
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
tkin,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)/len(parnames)) for p in parnames]
npars = len(parvals) / self.npeaks
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
else:
npars = len(parvals) / self.npeaks
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in xrange(self.npeaks):
modelkwargs = kwargs.copy()
for ii in xrange(npars):
name = parnames[ii+jj*npars].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*npars]})
v += ammonia(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False, **kwargs):
"""
Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call
"""
comps=[]
for ii in xrange(self.npeaks):
if hyperfine:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( ammonia(xarr,return_components=True,**modelkwargs) )
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, err=None,
parinfo=None,
quiet=True, shh=True,
debug=False,
maxiter=200,
use_lmfit=False,
veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
parinfo = self.parinfo = self.make_parinfo(**kwargs)
else:
if isinstance(parinfo, ParinfoList):
if not quiet:
log.info("Using user-specified parinfo.")
self.parinfo = parinfo
else:
if not quiet:
log.info("Using something like a user-specified parinfo, but not.")
self.parinfo = ParinfoList([p if isinstance(p,Parinfo) else Parinfo(p)
for p in parinfo],
preserve_order=True)
fitfun_kwargs = dict((x,y) for (x,y) in kwargs.items()
if x not in ('npeaks', 'params', 'parnames',
'fixed', 'limitedmin', 'limitedmax',
'minpars', 'maxpars', 'tied',
'max_tem_step'))
if 'use_lmfit' in fitfun_kwargs:
raise KeyError("use_lmfit was specified in a location where it "
"is unacceptable")
npars = len(parinfo)/self.npeaks
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))/err]
return f
if veryverbose:
log.info("GUESSES: ")
log.info(str(parinfo))
#log.info "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
if use_lmfit:
return self.lmfitter(xax, data, err=err,
parinfo=parinfo,
quiet=quiet,
debug=debug)
else:
mp = mpfit(mpfitfun(xax,data,err),
parinfo=parinfo,
maxiter=maxiter,
quiet=quiet,
debug=debug)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Final fit values: ")
for i,p in enumerate(mpp):
log.info(" ".join((parinfo[i]['parname'], str(p), " +/- ",
str(mpperr[i]))))
log.info(" ".join(("Chi2: ", str(mp.fnorm)," Reduced Chi2: ",
str(mp.fnorm/len(data)), " DOF:",
str(len(data)-len(mpp)))))
self.mp = mp
self.parinfo = parinfo
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames,
**fitfun_kwargs)(xax)
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars]
for jj in xrange(len(self.parinfo)/self.npars)]
modelkwargs = [
dict([(p['parname'].strip("0123456789").lower(),p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K', 'tex':'T_{ex}', 'ntot':'N', 'fortho':'F_o',
'width':'\\sigma', 'xoff_v':'v', 'fillingfraction':'FF',
'tau':'\\tau_{1-1}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def make_parinfo(self, quiet=True,
npeaks=1,
params=(20,20,14,1.0,0.0,0.5), parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True),
minpars=(2.73,2.73,0,0,0,0),
maxpars=(0,0,0,0,0,1),
tied=('',)*6,
max_tem_step=1.,
**kwargs
):
if not quiet:
log.info("Creating a 'parinfo' from guesses.")
self.npars = len(params) / npeaks
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None: parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params', 'parnames', 'fixed',
'limitedmin', 'limitedmax', 'minpars',
'maxpars', 'tied'],
[params, parnames, fixed, limitedmin,
limitedmax, minpars, maxpars, tied]))
# make sure all various things are the right length; if they're
# not, fix them using the defaults
# (you can put in guesses of length 12 but leave the rest length 6;
# this code then doubles the length of everything else)
for partype,parlist in partype_dict.iteritems():
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be
# multiplied by npars to get to the right number of
# gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
log.warn("WARNING! Input parameters were longer than allowed for variable {0}".format(parlist))
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars:
# all have minima of zero except kinetic temperature, which can't be below CMB.
# Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
elif parlist==tied:
partype_dict[partype] = [_increment_string_number(t, ii*self.npars)
for t in tied
for ii in range(self.npeaks)]
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(ii/self.npars),
'tied':partype_dict['tied'][ii],
'mpmaxstep':max_tem_step*float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in xrange(len(partype_dict['params'])) ]
# hack: remove 'fixed' pars
#parinfo_with_fixed = parinfo
#parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
#fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),
# p['value'])
# for p in parinfo_with_fixed if p['fixed'])
## don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
## this is OK - not a permanent change
#parnames = [p['parname'] for p in parinfo]
## not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
return parinfo
class ammonia_model_vtau(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_vtau,self).__init__(parnames=['tkin','tex','tau','width','xoff_v','fortho'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20, 10, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
class ammonia_model_background(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_background,self).__init__(npars=7,
parnames=['tkin', 'tex',
'ntot',
'width',
'xoff_v',
'fortho',
'background_tb'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1, 1.0, 0.0, 1.0, 2.73]
def __call__(self,*args,**kwargs):
#if self.multisingle == 'single':
# return self.onepeakammoniafit(*args,**kwargs)
#elif self.multisingle == 'multi':
# # Why is tied 6 instead of 7?
return self.multinh3fit(*args,**kwargs)
def make_parinfo(self, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,2.73), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(2.73,2.73,0,0,0,0,2.73), parinfo=None,
maxpars=(0,0,0,0,0,1,2.73),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).make_parinfo(npeaks=npeaks, err=err, params=params,
parnames=parnames, fixed=fixed,
limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,2.73), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(2.73,2.73,0,0,0,0,2.73), parinfo=None,
maxpars=(0,0,0,0,0,1,2.73),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).multinh3fit(xax, data, npeaks=npeaks, err=err,
params=params, parnames=parnames,
fixed=fixed, limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K', 'tex':'T_{ex}', 'ntot':'N', 'fortho':'F_o',
'width':'\\sigma', 'xoff_v':'v', 'fillingfraction':'FF',
'tau':'\\tau_{1-1}', 'background_tb':'T_{BG}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def _increment_string_number(st, count):
"""
Increment a number in a string
Expects input of the form: p[6]
"""
import re
dig = re.compile('[0-9]+')
if dig.search(st):
n = int(dig.search(st).group())
result = dig.sub(str(n+count), st)
return result
else:
return st
|
bsipocz/pyspeckit
|
pyspeckit/spectrum/models/ammonia.py
|
Python
|
mit
| 34,486
|
[
"Gaussian"
] |
a0ce5c825c6c5f23e964a4aa09fa9f5c3baf1bcc8f3cef8768ec02e8ba62ce9a
|
"""
pyDatalog
Copyright (C) 2012 Pierre Carbonnelle
Copyright (C) 2004 Shai Berger
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
This work is derived from Pythologic, (C) 2004 Shai Berger,
in accordance with the Python Software Foundation licence.
(See http://code.activestate.com/recipes/303057/ and
http://www.python.org/download/releases/2.0.1/license/ )
"""
"""
Design principle:
Instead of writing our own parser, we use python's parser. The datalog code is first compiled in python byte code,
then "undefined" variables are initialized as instance of Symbol, then the code is finally executed to load the clauses.
This is done in the load() and add_program() method of Parser class.
Methods exposed by this file:
* load(code)
* add_program(func)
* ask(code)
Classes hierarchy contained in this file: see class diagram on http://bit.ly/YRnMPH
* ProgramContext : class to safely differentiate between In-line queries and pyDatalog program / ask(), using ProgramMode global variable
* _transform_ast : performs some modifications of the abstract syntax tree of the datalog program
* LazyList : a subclassable list that is populated when it is accessed. Mixin for pyDatalog.Variable.
* LazyListOfList : Mixin for Literal and Body
* Literal : made of a predicate and a list of arguments. Instantiated when a symbol is called while executing the datalog program
* Body : a list of literals to be used in a clause. Instantiated when & is executed in the datalog program
* Expression : base class for objects that can be part of an inequality or operation
* VarSymbol : represents the symbol of a variable. Mixin for pyDatalog.Variable
* Symbol : contains a constant, a variable or a predicate. Instantiated before executing the datalog program
* Function : represents f[X]
* Operation : made of an operator and 2 operands. Instantiated when an operator is applied to a symbol while executing the datalog program
* Lambda : represents a lambda function, used in expressions
* Aggregate : represents calls to aggregation method, e.g. min(X)
"""
import ast
from collections import defaultdict, OrderedDict
import inspect
import os
import re
import string
import six
six.add_move(six.MovedModule('UserList', 'UserList', 'collections'))
from six.moves import builtins, xrange, UserList
import sys
import weakref
PY3 = sys.version_info[0] == 3
func_code = '__code__' if PY3 else 'func_code'
try:
from . import pyEngine
except ValueError:
import pyEngine
pyDatalog = None #circ: later set by pyDatalog to avoid circular import
""" global variable to differentiate between in-line queries and pyDatalog program / ask"""
ProgramMode = False
class ProgramContext(object):
"""class to safely use ProgramMode within the "with" statement"""
def __enter__(self):
global ProgramMode
ProgramMode = True
def __exit__(self, exc_type, exc_value, traceback):
global ProgramMode
ProgramMode = False
""" Parser methods """
def add_symbols(names, variables):
for name in names:
variables[name] = Symbol(name)
class _transform_ast(ast.NodeTransformer):
""" does some transformation of the Abstract Syntax Tree of the datalog program """
def visit_Call(self, node):
"""rename builtins to allow customization"""
self.generic_visit(node)
if hasattr(node.func, 'id'):
node.func.id = '_sum' if node.func.id == 'sum' else node.func.id
node.func.id = '_len' if node.func.id == 'len' else node.func.id
node.func.id = '_min' if node.func.id == 'min' else node.func.id
node.func.id = '_max' if node.func.id == 'max' else node.func.id
return node
def visit_Compare(self, node):
""" rename 'in' to allow customization of (X in (1,2))"""
self.generic_visit(node)
if 1 < len(node.comparators):
raise pyDatalog.DatalogError("Syntax error: please verify parenthesis around (in)equalities", node.lineno, None)
if not isinstance(node.ops[0], (ast.In, ast.NotIn)): return node
var = node.left # X, an _ast.Name object
comparators = node.comparators[0] # (1,2), an _ast.Tuple object
newNode = ast.Call(
ast.Attribute(var, '_in' if isinstance(node.ops[0], ast.In) else '_not_in', var.ctx), # func
[comparators], # args
[], # keywords
None, # starargs
None # kwargs
)
return ast.fix_missing_locations(newNode)
def load(code, newglobals=None, defined=None, function='load'):
""" code : a string or list of string
newglobals : global variables for executing the code
defined : reserved symbols
"""
newglobals, defined = newglobals or {}, defined or set([])
# remove indentation based on first non-blank line
lines = code.splitlines() if isinstance(code, six.string_types) else code
r = re.compile('^\s*')
for line in lines:
spaces = r.match(line).group()
if spaces and line != spaces:
break
code = '\n'.join([line.replace(spaces,'') for line in lines])
tree = ast.parse(code, function, 'exec')
try:
tree = _transform_ast().visit(tree)
except pyDatalog.DatalogError as e:
e.function = function
e.message = e.value
e.value = "%s\n%s" % (e.value, lines[e.lineno-1])
six.reraise(*sys.exc_info())
code = compile(tree, function, 'exec')
defined = defined.union(dir(builtins))
defined.add('None')
for name in set(code.co_names).difference(defined): # for names that are not defined
add_symbols((name,), newglobals)
try:
with ProgramContext():
six.exec_(code, newglobals)
except pyDatalog.DatalogError as e:
e.function = function
traceback = sys.exc_info()[2]
e.lineno = 1
while True:
if traceback.tb_frame.f_code.co_name == '<module>':
e.lineno = traceback.tb_lineno
break
elif traceback.tb_next:
traceback = traceback.tb_next
e.message = e.value
e.value = "%s\n%s" % (e.value, lines[e.lineno-1])
six.reraise(*sys.exc_info())
class _NoCallFunction(object):
""" This class prevents a call to a datalog program created using the 'program' decorator """
def __call__(self):
raise TypeError("Datalog programs are not callable")
def add_program(func):
""" A helper for decorator implementation """
source_code = inspect.getsource(func)
lines = source_code.splitlines()
# drop the first 2 lines (@pydatalog and def _() )
if '@' in lines[0]: del lines[0]
if 'def' in lines[0]: del lines[0]
source_code = lines
try:
code = func.__code__
except:
raise TypeError("function or method argument expected")
newglobals = func.__globals__.copy() if PY3 else func.func_globals.copy()
func_name = func.__name__ if PY3 else func.func_name
defined = set(code.co_varnames).union(set(newglobals.keys())) # local variables and global variables
load(source_code, newglobals, defined, function=func_name)
return _NoCallFunction()
def ask(code, _fast=None):
with ProgramContext():
tree = ast.parse(code, 'ask', 'eval')
tree = _transform_ast().visit(tree)
code = compile(tree, 'ask', 'eval')
newglobals = {}
add_symbols(code.co_names, newglobals)
parsed_code = eval(code, newglobals)
parsed_code = parsed_code.literal() if isinstance(parsed_code, Body) else parsed_code
return pyEngine.toAnswer(parsed_code.lua, parsed_code.lua.ask(_fast))
""" Parser classes """
class LazyList(UserList.UserList):
"""a subclassable list that is populated when it is accessed """
"""used by Literal, Body, pyDatalog.Variable to delay evaluation of datalog queries written in python """
""" during debugging, beware that viewing a Lazylist will force its udpate"""
def __init__(self):
self.todo = None # self.todo.ask() calculates self.data
self._data = []
@property
def data(self):
# returns the list, after recalculation if needed
if self.todo is not None: self.todo.ask()
return self._data
def _value(self):
return self.data
def v(self):
return self._data[0] if self.data else None
class LazyListOfList(LazyList):
""" represents the result of an inline query (a Literal or Body)"""
def __eq__(self, other):
return set(self.data) == set(other)
def __ge__(self, other):
# returns the first occurrence of 'other' variable in the result of a function
if self.data:
assert isinstance(other, pyDatalog.Variable)
for t in self.literal().terms:
if id(t) == id(other):
return t.data[0]
class Expression(object):
@classmethod
def _for(cls, operand):
if isinstance(operand, (Expression, Aggregate)):
return operand
if isinstance(operand, type(lambda: None)):
return Lambda(operand)
return Symbol(operand, forced_type="constant")
def _precalculation(self):
return Body() # by default, there is no precalculation needed to evaluate an expression
def __eq__(self, other):
assert isinstance(self, (VarSymbol, Function)), "Left-hand side of equality must be a symbol or function, not an expression."
other = Expression._for(other)
if self._pyD_type == 'variable' and (not isinstance(other, VarSymbol) or other._pyD_type=='constant'):
return Literal.make_for_comparison(self, '==', other)
else:
return Literal.make("=", (self, other))
def __ne__(self, other):
return Literal.make_for_comparison(self, '!=', other)
def __le__(self, other):
return Literal.make_for_comparison(self, '<=', other)
def __lt__(self, other):
return Literal.make_for_comparison(self, '<', other)
def __ge__(self, other):
return Literal.make_for_comparison(self, '>=', other)
def __gt__(self, other):
return Literal.make_for_comparison(self, '>', other)
def _in(self, values):
""" called when compiling (X in (1,2)) """
return Literal.make_for_comparison(self, 'in', values)
def _not_in(self, values):
""" called when compiling (X not in (1,2)) """
return Literal.make_for_comparison(self, 'not in', values)
def __pos__(self):
""" called when compiling -X """
return 0 + self
def __neg__(self):
""" called when compiling -X """
return 0 - self
def __add__(self, other):
return Operation(self, '+', other)
def __sub__(self, other):
return Operation(self, '-', other)
def __mul__(self, other):
return Operation(self, '*', other)
def __div__(self, other):
return Operation(self, '/', other)
def __truediv__(self, other):
return Operation(self, '/', other)
def __floordiv__(self, other):
return Operation(self, '//', other)
def __radd__(self, other):
return Operation(other, '+', self)
def __rsub__(self, other):
return Operation(other, '-', self)
def __rmul__(self, other):
return Operation(other, '*', self)
def __rdiv__(self, other):
return Operation(other, '/', self)
def __rtruediv__(self, other):
return Operation(self, '/', other)
def __rfloordiv__(self, other):
return Operation(other, '//', self)
def __getitem__(self, keys):
""" called when compiling expression[keys] """
return Operation(self, 'slice', keys)
def __getslice__(self, i, j):
return self.__getitem__(slice(i,j))
class VarSymbol(Expression):
""" represents the symbol of a variable, both inline and in pyDatalog program
"""
def __init__ (self, name, forced_type=None):
self._pyD_negated = False # for aggregate with sort in descending order
if isinstance(name, (list, tuple, xrange)):
self._pyD_value = list(map(Expression._for, name))
self._pyD_name = str([element._pyD_name for element in self._pyD_value])
self._pyD_type = 'tuple'
self._pyD_lua = pyEngine.Interned.of([e._pyD_lua for e in self._pyD_value])
elif isinstance(name, slice):
start, stop, step = map(Expression._for, (name.start, name.stop, name.step))
self._pyD_value = slice(start, stop, step)
self._pyD_name = '[%s:%s:%s]' % (start._pyD_name, stop._pyD_name, step._pyD_name)
self._pyD_type = 'slice'
self._pyD_lua = slice(start, stop, step) # TODO ._pyD_lua ?
elif forced_type=="constant" or isinstance(name, int) or not name or name[0] not in string.ascii_uppercase + '_':
self._pyD_value = name
self._pyD_name = str(name)
self._pyD_type = 'constant'
self._pyD_lua = pyEngine.Const(name)
else:
self._pyD_value = name
self._pyD_name = name
self._pyD_type = 'variable'
self._pyD_lua = pyEngine.Var(name)
def __neg__(self):
""" called when compiling -X """
neg = Symbol(self._pyD_value)
neg._pyD_negated = True
expr = 0 - self
expr.variable = neg
return expr
def lua_expr(self, variables):
if self._pyD_type == 'variable':
return pyEngine.Operand('variable', variables.index(self._pyD_name))
elif self._pyD_type == 'tuple':
return pyEngine.Operand('tuple', [element.lua_expr(variables) for element in self._pyD_value])
elif self._pyD_type == 'slice':
return pyEngine.Operand('slice', slice(self._pyD_lua.start.lua_expr(variables),
self._pyD_lua.stop.lua_expr(variables),
self._pyD_lua.step.lua_expr(variables),))
else:
return pyEngine.Operand('constant', self._pyD_value)
def _variables(self):
if self._pyD_type == 'variable':
return OrderedDict({self._pyD_name : self})
elif self._pyD_type == 'tuple':
variables = OrderedDict()
for element in self._pyD_value:
variables.update(element._variables())
return variables
elif self._pyD_type == 'slice':
variables = OrderedDict()
variables.update(self._pyD_lua.start._variables())
variables.update(self._pyD_lua.stop._variables())
variables.update(self._pyD_lua.step._variables())
return variables
else:
return OrderedDict()
class Symbol(VarSymbol):
"""
can be constant, list, tuple, variable or predicate name
ask() creates a query
created when analysing the datalog program
"""
def __call__ (self, *args, **kwargs):
""" called when compiling p(args) """
"time to create a literal !"
if self._pyD_name == 'ask':
if 1<len(args):
raise RuntimeError('Too many arguments for ask !')
fast = kwargs['_fast'] if '_fast' in list(kwargs.keys()) else False
literal = args[0] if not isinstance(args[0], Body) else args[0].literal()
return pyEngine.toAnswer(literal.lua, literal.lua.ask(fast))
elif self._pyD_name == '_sum':
if isinstance(args[0], VarSymbol):
return Sum_aggregate(args[0], for_each=kwargs.get('for_each', kwargs.get('key', [])))
else:
return sum(args)
elif self._pyD_name == 'concat':
return Concat_aggregate(args[0], order_by=kwargs.get('order_by',kwargs.get('key', [])), sep=kwargs['sep'])
elif self._pyD_name == '_min':
if isinstance(args[0], VarSymbol):
return Min_aggregate(args[0], order_by=kwargs.get('order_by',kwargs.get('key', [])),)
else:
return min(args)
elif self._pyD_name == '_max':
if isinstance(args[0], VarSymbol):
return Max_aggregate(args[0], order_by=kwargs.get('order_by',kwargs.get('key', [])),)
else:
return max(args)
elif self._pyD_name == 'rank':
return Rank_aggregate(None, for_each=kwargs.get('for_each', []), order_by=kwargs.get('order_by', []))
elif self._pyD_name == 'running_sum':
return Running_sum(args[0], for_each=kwargs.get('for_each', []), order_by=kwargs.get('order_by', []))
elif self._pyD_name == '_len':
if isinstance(args[0], VarSymbol):
return Len_aggregate(args[0])
else:
return len(args[0])
else:
new_args, pre_calculations = [], Body()
for arg in args:
if isinstance(arg, (Operation, Function, Lambda)):
Y = Function.newSymbol()
new_args.append(Y)
pre_calculations = pre_calculations & (Y == arg)
else:
new_args.append(arg)
literal = Literal.make(self._pyD_name, tuple(new_args))
literal.pre_calculations = pre_calculations
return literal
def __getattr__(self, name):
""" called when compiling class.attribute """
return Symbol(self._pyD_name + '.' + name)
def __getitem__(self, keys):
""" called when compiling name[keys] """
return Function(self._pyD_name, keys)
def __str__(self):
return str(self._pyD_name)
def __setitem__(self, keys, value):
""" called when compiling f[X] = expression """
function = Function(self._pyD_name, keys)
# following statement translates it into (f[X]==V) <= (V==expression)
(function == function.symbol) <= (function.symbol == value)
class Function(Expression):
""" represents predicate[a, b]"""
Counter = 0
@classmethod
def newSymbol(cls):
Function.Counter += 1
return Symbol('_pyD_X%i' % Function.Counter)
def __init__(self, name, keys):
if not isinstance(keys, tuple):
keys = (keys,)
self.name = "%s[%i]" % (name, len(keys))
self.keys, self.pre_calculations = [], Body()
for key in keys:
if isinstance(key, (Operation, Function, Lambda)):
Y = Function.newSymbol()
self.keys.append(Y)
self.pre_calculations = self.pre_calculations & (Y == key)
else:
self.keys.append(key)
self.symbol = Function.newSymbol()
self.dummy_variable_name = '_pyD_X%i' % Function.Counter
@property
def _pyD_name(self):
return str(self)
def __eq__(self, other):
return Literal.make_for_comparison(self, '==', other)
# following methods are used when the function is used in an expression
def _variables(self):
return {self.dummy_variable_name : self.symbol}
def lua_expr(self, variables):
return pyEngine.Operand('variable', variables.index(self.dummy_variable_name))
def _precalculation(self):
return self.pre_calculations & (self == self.symbol)
class Operation(Expression):
"""made of an operator and 2 operands. Instantiated when an operator is applied to a symbol while executing the datalog program"""
def __init__(self, lhs, operator, rhs):
self.operator = operator
self.lhs = Expression._for(lhs)
self.rhs = Expression._for(rhs)
@property
def _pyD_name(self):
return str(self)
def _variables(self):
temp = self.lhs._variables()
temp.update(self.rhs._variables())
return temp
def _precalculation(self):
return self.lhs._precalculation() & self.rhs._precalculation()
def lua_expr(self, variables):
return pyEngine.Expression(self.operator, self.lhs.lua_expr(variables), self.rhs.lua_expr(variables))
def __str__(self):
return '(' + str(self.lhs._pyD_name) + self.operator + str(self.rhs._pyD_name) + ')'
class Lambda(Expression):
"""represents a lambda function, used in expressions"""
def __init__(self, other):
self.operator = '<lambda>'
self.lambda_object = other
@property
def _pyD_name(self):
return str(self)
def _variables(self):
return dict([ [var, Symbol(var)] for var in getattr(self.lambda_object,func_code).co_varnames])
def lua_expr(self, variables):
operands = [pyEngine.Operand('variable', variables.index(varname)) for varname in getattr(self.lambda_object,func_code).co_varnames]
return pyEngine.make_lambda(self.lambda_object, operands)
def __str__(self):
return 'lambda%i(%s)' % (id(self.lambda_object), ','.join(getattr(self.lambda_object,func_code).co_varnames))
class Literal(object):
"""
created by source code like 'p(a, b)'
operator '<=' means 'is true if', and creates a Clause
"""
def __init__(self, predicate_name, terms, prearity=None, aggregate=None):
self.predicate_name = predicate_name
self.prearity = prearity or len(terms)
self.pre_calculations = Body()
self.args = terms # TODO simplify
self.todo = self
cls_name = predicate_name.split('.')[0].replace('~','') if 1< len(predicate_name.split('.')) else ''
self.terms = []
for i, arg in enumerate(terms):
if isinstance(arg, Literal):
raise pyDatalog.DatalogError("Syntax error: Literals cannot have a literal as argument : %s%s" % (predicate_name, self.terms), None, None)
elif not isinstance(arg, VarSymbol) and i==0 and cls_name and cls_name not in [c.__name__ for c in arg.__class__.__mro__]:
raise TypeError("Object is incompatible with the class that is queried.")
elif isinstance(arg, Aggregate):
raise pyDatalog.DatalogError("Syntax error: Incorrect use of aggregation.", None, None)
if isinstance(arg, pyDatalog.Variable):
arg.todo = self
del arg._data[:] # reset variables
self.terms.append(Expression._for(arg))
tbl = [a._pyD_lua for a in self.terms]
# now create the literal for the head of a clause
self.lua = pyEngine.Literal(predicate_name, tbl, prearity, aggregate)
# TODO check that l.pred.aggregate is empty
@classmethod
def make(cls, predicate_name, terms, prearity=None, aggregate=None):
if predicate_name[-1]=='!':
return HeadLiteral(predicate_name, terms, prearity, aggregate)
else:
return Query(predicate_name, terms, prearity, aggregate)
@classmethod
def make_for_comparison(cls, self, operator, other):
assert operator=="==" or not isinstance(other, Aggregate), "Aggregate operators can only be used with =="
other = Expression._for(other)
if isinstance(self, Function):
if isinstance(other, Aggregate): # p[X]==aggregate() # TODO create 2 literals here
if operator != '==':
raise pyDatalog.DatalogError("Aggregate operator can only be used with equality.", None, None)
name, terms, prearity = (self.name + '==', list(self.keys) + [other], len(self.keys))
# 1 create literal for queries
terms[-1] = (Symbol('X')) # (X, X)
l = Literal.make(name, terms, prearity, other)
pyDatalog.add_clause(l, l) # body will be ignored, but is needed to make the clause safe
# 2 prepare literal for the calculation. It can be used in the head only
del terms[-1] # --> (X,)
terms.extend(other.args)
prearity = len(terms) # (X,Y,Z)
return Literal.make(name + '!', terms, prearity=prearity)
elif operator != '==' or isinstance(other, (Operation, Function, Lambda)):
if '.' not in self.name: # p[X]<Y+Z transformed into (p[X]=Y1) & (Y1<Y+Z)
literal = Literal.make(self.name+'==', list(self.keys)+[self.symbol], prearity=len(self.keys))
return literal & pyEngine.compare2(self.symbol, operator, other)
elif isinstance(other, (Operation, Function, Lambda)): # a.p[X]<Y+Z transformed into (Y2==Y+Z) & (a.p[X]<Y2)
Y2 = Function.newSymbol()
return (Y2 == other) & Literal.make(self.name + operator, list(self.keys) + [Y2], prearity=len(self.keys))
return Literal.make(self.name + operator, list(self.keys) + [other], prearity=len(self.keys))
else:
if not isinstance(other, Expression):
raise pyDatalog.DatalogError("Syntax error: Symbol or Expression expected", None, None)
name = '=' + self._pyD_name + operator + str(other._pyD_name)
literal = Literal.make(name, [self] + list(other._variables().values()))
expr = other.lua_expr(list(self._variables().keys())+list(other._variables().keys()))
literal.pre_calculations = other._precalculation()
pyEngine.add_expr_to_predicate(literal.lua.pred, operator, expr)
return literal
@property
def literals(self):
return [self]
def _variables(self):
variables = OrderedDict()
for term in self.terms:
variables.update(term._variables())
return variables
def __le__(self, body):
" head <= body"
if not isinstance(body, (Literal, Body)):
raise pyDatalog.DatalogError("Invalid body for clause", None, None)
newBody = Body()
for literal in body.literals:
if isinstance(literal, HeadLiteral):
raise pyDatalog.DatalogError("Aggregation cannot appear in the body of a clause", None, None)
newBody = newBody & literal.pre_calculations & literal
result = pyDatalog.add_clause(self, newBody)
if not result:
raise pyDatalog.DatalogError("Can't create clause", None, None)
return result
class HeadLiteral(Literal):
""" represents literals that can be used only in head of clauses, i.e. literals with aggregate function"""
pass
class Query(Literal, LazyListOfList):
"""
represents a literal that can be queried (thus excludes aggregate literals)
unary operator '+' means insert it as fact
binary operator '&' means 'and', and returns a Body
"""
def __init__(self, predicate_name, terms, prearity=None, aggregate=None):
LazyListOfList.__init__(self)
Literal.__init__(self, predicate_name, terms, prearity, aggregate)
def ask(self):
self._data = self.lua.ask(False)
self.todo = None
if not ProgramMode and self.data:
transposed = list(zip(*(self._data))) # transpose result
result = []
for i, arg in enumerate(self.terms):
if isinstance(arg, pyDatalog.Variable) and len(arg._data)==0:
arg._data.extend(transposed[i])
arg.todo = None
result.append(transposed[i])
self._data = list(zip(*result)) if result else [()]
def __pos__(self):
" unary + means insert into database as fact "
assert not self._variables(), "Cannot assert a fact containing Variables"
pyDatalog._assert_fact(self)
def __neg__(self):
" unary - means retract fact from database "
assert not self._variables(), "Cannot assert a fact containing Variables"
pyDatalog._retract_fact(self)
def __invert__(self):
"""unary ~ means negation """
# TODO test with python queries
return Literal.make('~' + self.predicate_name, self.terms)
def __and__(self, other):
" literal & literal"
return Body(self, other)
def __str__(self):
if ProgramMode:
terms = list(map (str, self.terms))
return str(self.predicate_name) + "(" + ','.join(terms) + ")"
else:
return LazyListOfList.__str__(self)
def __eq__(self, other):
if ProgramMode:
raise pyDatalog.DatalogError("Syntax error near equality: consider using brackets. %s" % str(self), None, None)
else:
return super(Literal, self).__eq__(other)
def literal(self):
return self
class Body(LazyListOfList):
"""
created by p(a,b) & q(c,d)
operator '&' means 'and', and returns a Body
"""
Counter = 0
def __init__(self, *args):
LazyListOfList.__init__(self)
self.literals = []
for arg in args:
self.literals += [arg] if isinstance(arg, Literal) else arg.literals
self.has_variables = False
self.todo = self
for literal in self.literals:
if literal._variables():
self.has_variables = True
for arg in literal.terms:
if isinstance(arg, pyDatalog.Variable):
arg.todo = self
def __and__(self, body2):
return Body(self, body2)
def __str__(self):
if self.has_variables:
return LazyListOfList.__str__(self)
return ' & '.join(list(map (str, self.literals)))
def literal(self, permanent=False):
# return a literal that can be queried to resolve the body
env = OrderedDict()
for literal in self.literals:
for term in literal._variables().values():
env[term._pyD_name] = term
if permanent:
literal = Literal.make('_pyD_query' + str(Body.Counter), list(env.values()))
Body.Counter = Body.Counter + 1
else:
literal = Literal.make('_pyD_query', list(env.values()))
literal.lua.pred.reset_clauses()
literal <= self
return literal
def __invert__(self):
"""unary ~ means negation """
return ~(self.literal(permanent=True))
def ask(self):
literal = self.literal()
literal.ask()
self._data = literal.data
##################################### Aggregation #####################################
class Aggregate(object):
"""
represents a generic aggregation_method(X, for_each=Y, order_by=Z, sep=sep)
e.g. 'sum(Y,key=Z)' in '(a[X]==sum(Y,key=Z))'
pyEngine calls sort_result(), key(), reset(), add() and fact() to compute the aggregate
"""
def __init__(self, Y=None, for_each=tuple(), order_by=tuple(), sep=None):
# convert for_each=Z to for_each=(Z,)
self.Y = Y
self.for_each = (for_each,) if isinstance(for_each, Expression) else tuple(for_each)
self.order_by = (order_by,) if isinstance(order_by, Expression) else tuple(order_by)
# try to recast expressions to variables
self.for_each = tuple([e.__dict__.get('variable', e) for e in self.for_each])
self.order_by = tuple([e.__dict__.get('variable', e) for e in self.order_by])
assert all([isinstance(e, VarSymbol) for e in self.for_each]), "for_each argument of aggregate must be variable(s), not expression(s)."
assert all([isinstance(e, VarSymbol) for e in self.order_by]), "order_by argument of aggregate must be variable(s), not expression(s)."
if sep and not isinstance(sep, six.string_types):
raise pyDatalog.DatalogError("Separator in aggregation must be a string", None, None)
self.sep = sep
# verify presence of keyword arguments
for kw in self.required_kw:
arg = getattr(self, kw)
if arg is None or (isinstance(arg, tuple) and arg == tuple()):
raise pyDatalog.DatalogError("Error: argument missing in aggregate", None, None)
# used to create literal. TODO : filter on symbols
self.args = ((Y,) if Y is not None else tuple()) + self.for_each + self.order_by + ((sep,) if sep is not None else tuple())
self.Y_arity = 1 if Y is not None else 0
self.sep_arity = 1 if sep is not None else 0
@property
def arity(self):
# of the aggregate function, not of the full predicate
return len(self.args)
def sort_result(self, result):
# significant indexes in the result rows
order_by_start = len(result[0]) - len(self.order_by) - self.sep_arity
for_each_start = order_by_start - len(self.for_each)
self.to_add = for_each_start-1
self.slice_for_each = slice(for_each_start, order_by_start)
self.reversed_order_by = range(len(result[0])-1-self.sep_arity, order_by_start-1, -1)
self.slice_group_by = slice(0, for_each_start-self.Y_arity)
# first sort per order_by, allowing for _pyD_negated
for i in self.reversed_order_by:
result.sort(key=lambda literal, i=i: literal[i].id,
reverse = self.order_by[i-order_by_start]._pyD_negated)
# then sort per group_by
result.sort(key=lambda literal, self=self: [id(term) for term in literal[self.slice_group_by]])
pass
def key(self, result):
# return the grouping key of a result
return list(result[:len(result)-self.arity])
def reset(self):
self._value = 0
@property
def value(self):
return self._value
def fact(self,k):
return k + [pyEngine.Const(self.value)]
class Sum_aggregate(Aggregate):
""" represents sum(Y, for_each=(Z,T))"""
required_kw = ('Y', 'for_each')
def add(self, row):
self._value += row[-self.arity].id
class Len_aggregate(Aggregate):
""" represents len(X)"""
required_kw = ('Y')
def add(self, row):
self._value += 1
class Concat_aggregate(Aggregate):
""" represents concat(Y, order_by=(Z1,Z2), sep=sep)"""
required_kw = ('Y', 'order_by', 'sep')
def reset(self):
self._value = []
def add(self, row):
self._value.append(row[-self.arity].id)
@property
def value(self):
return self.sep.join(self._value)
class Min_aggregate(Aggregate):
""" represents min(Y, order_by=(Z,T))"""
required_kw = ('Y', 'order_by')
def reset(self):
self._value = None
def add(self, row):
self._value = row[-self.arity].id if self._value is None else self._value
class Max_aggregate(Min_aggregate):
""" represents max(Y, order_by=(Z,T))"""
def __init__(self, *args, **kwargs):
Min_aggregate.__init__(self, *args, **kwargs)
for a in self.order_by:
a._pyD_negated = not(a._pyD_negated)
class Rank_aggregate(Aggregate):
""" represents rank(for_each=(Z), order_by(T))"""
required_kw = ('for_each', 'order_by')
def reset(self):
self.count = 0
self._value = None
def add(self, row):
# retain the value if (X,) == (Z,)
if row[self.slice_group_by] == row[self.slice_for_each]:
self._value = list(row[self.slice_group_by]) + [pyEngine.Const(self.count),]
return self._value
self.count += 1
def fact(self, k):
return self._value
class Running_sum(Rank_aggregate):
""" represents running_sum(Y, for_each=(Z), order_by(T)"""
required_kw = ('Y', 'for_each', 'order_by')
def add(self,row):
self.count += row[self.to_add].id # TODO
if row[:self.to_add] == row[self.slice_for_each]:
self._value = list(row[:self.to_add]) + [pyEngine.Const(self.count),]
return self._value
|
baojie/pydatalog
|
pyDatalog/pyParser.py
|
Python
|
lgpl-2.1
| 37,914
|
[
"VisIt"
] |
0cc4cbb7532c693226d4065b0930e22f8174d14ead089dfe83f2297483af14f5
|
#! /usr/bin/python
from setuptools import setup
import sys
sys.path.append("support");
sys.path.append("support/yaraspell");
sys.path.append("interfaces/web/lib");
sys.path.append("interfaces/web/lib/paste");
sys.path.append("interfaces/web/lib/simlejson");
sys.path.append("interfaces/web/lib/okasha2");
sys.path.append("interfaces/web");
sys.path.append("interfaces/gui");
sys.path.append("mishkal");
import py2exe
MyDataFiles = [
('data', ['./data/randomtext.txt']),
('docs', [
'./docs/AUTHORS.txt',
'./docs/ChangeLog.txt',
'./docs/COPYING.txt',
'./docs/HowTo.odt',
'./docs/Ideas.odt',
'./docs/ideas.txt',
'./docs/README.txt',
'./docs/THANKS.txt',
'./docs/TODO.txt',
'./docs/VERSION.txt',
]),
('docs/html/images',
['./docs/html/images/adawatstyle.css',
'./docs/html/images/gotashkeel.png',
'./docs/html/images/mishkal.png',
'./docs/html/images/mishkal_alpha_smpl.png',
'./docs/html/images/mixkal.jpg',
]
),
# ('interfaces/',[]),
# ('interfaces/gui/',[]),
# ('interfaces/gui/ar',
('ar',
[
'./interfaces/gui/ar/about.html',
'./interfaces/gui/ar/help_body.html',
# './interfaces/gui/ar/images',
'./interfaces/gui/ar/projects.html',
'./interfaces/gui/ar/style.css',
]
),
# ('interfaces/gui/ar/images',
('ar/images',
[
'./interfaces/gui/ar/images/alef_wasla.png',
'./interfaces/gui/ar/images/animation.png',
'./interfaces/gui/ar/images/appicon.ico',
'./interfaces/gui/ar/images/appicon.png',
'./interfaces/gui/ar/images/copy.png',
'./interfaces/gui/ar/images/cut.png',
'./interfaces/gui/ar/images/damma.png',
'./interfaces/gui/ar/images/dammatan.png',
'./interfaces/gui/ar/images/exit.png',
'./interfaces/gui/ar/images/fatha.png',
'./interfaces/gui/ar/images/fathatan.png',
'./interfaces/gui/ar/images/font.png',
'./interfaces/gui/ar/images/gaf.png',
'./interfaces/gui/ar/images/help.jpg',
'./interfaces/gui/ar/images/icon.png',
'./interfaces/gui/ar/images/ix.ico',
'./interfaces/gui/ar/images/ixn.ico',
'./interfaces/gui/ar/images/kasra.png',
'./interfaces/gui/ar/images/kasratan.png',
'./interfaces/gui/ar/images/logo.png',
'./interfaces/gui/ar/images/new.png',
'./interfaces/gui/ar/images/open.png',
'./interfaces/gui/ar/images/paste.png',
'./interfaces/gui/ar/images/peh.png',
'./interfaces/gui/ar/images/preview.png',
'./interfaces/gui/ar/images/print.png',
'./interfaces/gui/ar/images/qutrub.ico',
'./interfaces/gui/ar/images/save.png',
'./interfaces/gui/ar/images/shadda.png',
'./interfaces/gui/ar/images/smallalef.png',
'./interfaces/gui/ar/images/sukun.png',
# './interfaces/gui/ar/images/svg',
'./interfaces/gui/ar/images/tatweel.png',
'./interfaces/gui/ar/images/text-speak.png',
'./interfaces/gui/ar/images/weblogo.ico',
'./interfaces/gui/ar/images/zoomin.png',
'./interfaces/gui/ar/images/zoomout.png',
'./interfaces/gui/ar/images/zwj.png',
'./interfaces/gui/ar/images/zwnj.png',
],
),
# ('interfaces/gui/gui/',[]),
# ('interfaces/gui/gui/ar',
('gui/ar',
[
'./interfaces/gui/gui/ar/about.html',
'./interfaces/gui/gui/ar/help_body.html',
# './interfaces/gui/gui/ar/images',
'./interfaces/gui/gui/ar/projects.html',
'./interfaces/gui/gui/ar/style.css',
]
),
# ('interfaces/gui/gui/ar/images',
('gui/ar/images',
['./interfaces/gui/gui/ar/images/alef_wasla.png',
'./interfaces/gui/gui/ar/images/animation.png',
'./interfaces/gui/gui/ar/images/appicon.ico',
'./interfaces/gui/gui/ar/images/appicon.png',
'./interfaces/gui/gui/ar/images/copy.png',
'./interfaces/gui/gui/ar/images/cut.png',
'./interfaces/gui/gui/ar/images/damma.png',
'./interfaces/gui/gui/ar/images/dammatan.png',
'./interfaces/gui/gui/ar/images/exit.png',
'./interfaces/gui/gui/ar/images/fatha.png',
'./interfaces/gui/gui/ar/images/fathatan.png',
'./interfaces/gui/gui/ar/images/font.png',
'./interfaces/gui/gui/ar/images/gaf.png',
'./interfaces/gui/gui/ar/images/help.jpg',
'./interfaces/gui/gui/ar/images/icon.png',
'./interfaces/gui/gui/ar/images/ix.ico',
'./interfaces/gui/gui/ar/images/ixn.ico',
'./interfaces/gui/gui/ar/images/kasra.png',
'./interfaces/gui/gui/ar/images/kasratan.png',
'./interfaces/gui/gui/ar/images/logo.png',
'./interfaces/gui/gui/ar/images/new.png',
'./interfaces/gui/gui/ar/images/open.png',
'./interfaces/gui/gui/ar/images/paste.png',
'./interfaces/gui/gui/ar/images/peh.png',
'./interfaces/gui/gui/ar/images/preview.png',
'./interfaces/gui/gui/ar/images/print.png',
'./interfaces/gui/gui/ar/images/qutrub.ico',
'./interfaces/gui/gui/ar/images/save.png',
'./interfaces/gui/gui/ar/images/shadda.png',
'./interfaces/gui/gui/ar/images/smallalef.png',
'./interfaces/gui/gui/ar/images/sukun.png',
# './interfaces/gui/gui/ar/images/svg',
'./interfaces/gui/gui/ar/images/tatweel.png',
'./interfaces/gui/gui/ar/images/text-speak.png',
# './interfaces/gui/gui/ar/images/txt',
'./interfaces/gui/gui/ar/images/weblogo.ico',
'./interfaces/gui/gui/ar/images/zoomin.png',
'./interfaces/gui/gui/ar/images/zoomout.png',
'./interfaces/gui/gui/ar/images/zwj.png',
'./interfaces/gui/gui/ar/images/zwnj.png',
]
),
# ('',[]),
# ('resources/',[]),
('tmp',[]),
('resources/errorPages',
[
'./interfaces/web/resources/errorPages/400.shtml',
'./interfaces/web/resources/errorPages/404.shtml',
'./interfaces/web/resources/errorPages/500.shtml',
# './interfaces/web/resources/errorPages/images',
'./interfaces/web/resources/errorPages/Index.html',
'./interfaces/web/resources/errorPages/logo.png',
'./interfaces/web/resources/errorPages/images/logo.png',
],
),
('resources/errorPages/images',
['./interfaces/web/resources/errorPages/images/logo.png',
],
),
('resources/files',
[
'./interfaces/web/resources/files/adawat.js',
'./interfaces/web/resources/files/adawatstyle.css',
'./interfaces/web/resources/files/cytoscape.min.js',
'./interfaces/web/resources/files/favicon1.png',
'./interfaces/web/resources/files/jquery-1.7.1.min.js',
'./interfaces/web/resources/files/jquery-3.3.1.min.js',
'./interfaces/web/resources/files/jquery.min.js',
'./interfaces/web/resources/files/logo-icon.png',
'./interfaces/web/resources/files/logo.png',
# './interfaces/web/resources/files/xzero-rtl',
],
),
('resources/files/fonts',
['./interfaces/web/resources/files/fonts/amiri-quran-colored.eot',
'./interfaces/web/resources/files/fonts/amiri-quran-colored.ttf',
'./interfaces/web/resources/files/fonts/amiri-quran-colored.woff',
'./interfaces/web/resources/files/fonts/DroidNaskh-Regular-Colored.ttf',
'./interfaces/web/resources/files/fonts/DroidNaskh-Regular-Colored.woff',
'./interfaces/web/resources/files/fonts/KacstOne.eot',
'./interfaces/web/resources/files/fonts/KacstOne.otf',
'./interfaces/web/resources/files/fonts/KacstOne.svg',
'./interfaces/web/resources/files/fonts/KacstOne.ttf',
'./interfaces/web/resources/files/fonts/KacstOne.woff',
'./interfaces/web/resources/files/fonts/KacstOneColored.ttf',
'./interfaces/web/resources/files/fonts/SimpleNaskhi-colores.ttf',
]
),
('resources/files/images',
['./interfaces/web/resources/files/images/adawat.png',
'./interfaces/web/resources/files/images/ayaspell.png',
'./interfaces/web/resources/files/images/dreamdevdz.jpeg',
'./interfaces/web/resources/files/images/main167-750x402.jpg',
'./interfaces/web/resources/files/images/pyarabic.png',
'./interfaces/web/resources/files/images/qutrub.jpg',
'./interfaces/web/resources/files/images/radif.png',
'./interfaces/web/resources/files/images/tashaphyne.png',
]
),
('resources/files/samples',
[
'./interfaces/web/resources/files/samples/gotashkeel.png',
'./interfaces/web/resources/files/samples/mishkal.png',
'./interfaces/web/resources/files/samples/mishkal_alpha_smpl.png',
'./interfaces/web/resources/files/samples/mixkal.jpg',
]
),
## Todo
# ('resources/files/xzero-rtl',[]),
('resources/files/xzero-rtl/css',
[
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic-theme.css',
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic-theme.css.map',
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic-theme.min.css',
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic.css',
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic.css.map',
'./interfaces/web/resources/files/xzero-rtl/css/bootstrap-arabic.min.css',
]
),
('resources/files/xzero-rtl/fonts',
[
'./interfaces/web/resources/files/xzero-rtl/fonts/glyphicons-halflings-regular.eot',
'./interfaces/web/resources/files/xzero-rtl/fonts/glyphicons-halflings-regular.svg',
'./interfaces/web/resources/files/xzero-rtl/fonts/glyphicons-halflings-regular.ttf',
'./interfaces/web/resources/files/xzero-rtl/fonts/glyphicons-halflings-regular.woff',
]
),
('resources/files/xzero-rtl/js',
[
'./interfaces/web/resources/files/xzero-rtl/js/bootstrap-arabic.js',
'./interfaces/web/resources/files/xzero-rtl/js/bootstrap-arabic.min.js',
]
),
('./interfaces/web/resources/templates',
[ './interfaces/web/resources/templates/carousel.html',
'./interfaces/web/resources/templates/contact.html',
'./interfaces/web/resources/templates/doc.html',
'./interfaces/web/resources/templates/download.html',
'./interfaces/web/resources/templates/main.html',
'./interfaces/web/resources/templates/projects.html',
],
),
]; # end MyDataFiles
setup(name='Mishkal Software', version='1.5',
description='Mishkal Software',
author='Taha Zerrouki',
author_email='taha.zerrouki@gmail.com',
url='http://tahadz.com/mishkal',
license='GPL',
windows = [
{
"script": "interfaces/gui/mishkal-gui.py",
"icon_resources": [(1, "./interfaces/gui/ar/images/ix.ico")],
}],
console = [
{
"script": "bin/mishkal-console.py",
"icon_resources": [(1, "./interfaces/gui/ar/images/ixn.ico")],
}
,
{
"script": "interfaces/web/mishkal-webserver.py",
"icon_resources": [(1, "./interfaces/gui/ar/images/weblogo.ico")],
}
],
# to avoid zipped file
zipfile=None,
classifiers=[
'Development Status :: 5 - Beta',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS independent',
'Programming Language :: Python',
],
options = {
"py2exe": {
"compressed": 1,
"optimize": 2,
"bundle_files": 2,
# "dll_excludes": [ "MSVCP90.dll", ],
"includes":["sip"],
}
},
data_files=MyDataFiles,
#end setup
)
|
linuxscout/mishkal
|
exe_setup.py
|
Python
|
gpl-3.0
| 10,480
|
[
"Cytoscape"
] |
775ef2c270c71f7c05ab59b69853b4b7d2bb0cae4d698f8b0fc646229185681e
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import atexit
import datetime
from typing import Union
from qcelemental.util import which, which_import
from . import core
# Numpy place holder for files and cleanup
numpy_files = []
def register_numpy_file(filename):
if not filename.endswith('.npy'): filename += '.npy'
if filename not in numpy_files:
numpy_files.append(filename)
def clean_numpy_files():
for nfile in numpy_files:
os.unlink(nfile)
atexit.register(clean_numpy_files)
def exit_printing(start_time=None, success=None):
"""Prints the exit time and status.
Parameters
----------
start_time : datetime.datetime, optional
starting time from which the elapsed time is computed.
success : bool
Provides a success flag, otherwise uses the _success_flag_ global variable
Returns
-------
None
"""
end_time = datetime.datetime.now()
core.print_out("\n Psi4 stopped on: {}".format(end_time.strftime('%A, %d %B %Y %I:%M%p')))
if start_time is not None:
run_time = end_time - start_time
run_time = str(run_time).split('.')
run_time = run_time[0] + '.' + run_time[1][:2]
core.print_out("\n Psi4 wall time for execution: {}\n".format(run_time))
if success is None:
success = _success_flag_
if success:
core.print_out("\n*** Psi4 exiting successfully. Buy a developer a beer!\n")
else:
core.print_out("\n*** Psi4 encountered an error. Buy a developer more coffee!\n")
core.print_out("*** Resources and help at github.com/psi4/psi4.\n")
_success_flag_ = False
# Working directory
_input_dir_ = os.getcwd()
def get_input_directory():
return _input_dir_
# Add-Ons
def _CMake_to_Py_boolean(cmakevar):
if cmakevar.upper() in ["1", "ON", "YES", "TRUE", "Y"]:
return True
else:
return False
def psi4_which(command, *, return_bool: bool = False, raise_error: bool = False,
raise_msg: str = None) -> Union[bool, None, str]:
"""Test to see if a command is available in Psi4 search path.
Returns
-------
str or None
By default, returns command path if command found or `None` if not.
Environment is $PSIPATH:$PATH, less any None values.
bool
When `return_bool=True`, returns whether or not found.
Raises
------
ModuleNotFoundError
When `raises_error=True` and command not found.
"""
lenv = (os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep) if x != '']) +
os.pathsep + os.environ.get('PATH', ''))
return which(command=command, return_bool=return_bool, raise_error=raise_error, raise_msg=raise_msg, env=lenv)
_addons_ = {
"ambit": _CMake_to_Py_boolean("@ENABLE_ambit@"),
"chemps2": _CMake_to_Py_boolean("@ENABLE_CheMPS2@"),
"dkh": _CMake_to_Py_boolean("@ENABLE_dkh@"),
"libefp": which_import("pylibefp", return_bool=True),
"erd": _CMake_to_Py_boolean("@ENABLE_erd@"),
"gdma": _CMake_to_Py_boolean("@ENABLE_gdma@"),
"ipi": which_import("ipi", return_bool=True),
"pcmsolver": _CMake_to_Py_boolean("@ENABLE_PCMSolver@"),
"cppe": which_import("cppe", return_bool=True),
"simint": _CMake_to_Py_boolean("@ENABLE_simint@"),
"dftd3": psi4_which("dftd3", return_bool=True),
"cfour": psi4_which("xcfour", return_bool=True),
"mrcc": psi4_which("dmrcc", return_bool=True),
"gcp": psi4_which("gcp", return_bool=True),
"v2rdm_casscf": which_import("v2rdm_casscf", return_bool=True),
"gpu_dfcc": which_import("gpu_dfcc", return_bool=True),
"forte": which_import("forte", return_bool=True),
"snsmp2": which_import("snsmp2", return_bool=True),
"resp": which_import("resp", return_bool=True),
"psi4fockci": which_import("psi4fockci", return_bool=True),
"adcc": which_import("adcc", return_bool=True),
"mdi": which_import("mdi", return_bool=True),
"cct3": which_import("cct3", return_bool=True),
}
def addons(request=None):
"""Returns boolean of whether Add-On *request* is available to Psi4,
either compiled in or searchable in $PSIPATH:$PATH, as relevant. If
*request* not passed, returns list of available Add-Ons.
"""
if request is None:
return sorted([k for k, v in _addons_.items() if v])
return _addons_[request.lower()]
# Testing
def test(extent='full', extras=None):
"""Runs a test suite through pytest.
Parameters
----------
extent : {'smoke', 'quick', 'full', 'long'}
All choices are defined, but choices may be redundant in some projects.
_smoke_ will be minimal "is-working?" test(s).
_quick_ will be as much coverage as can be got quickly, approx. 1/3 tests.
_full_ will be the whole test suite, less some exceedingly long outliers.
_long_ will be the whole test suite.
extras : list
Additional arguments to pass to `pytest`.
Returns
-------
int
Return code from `pytest.main()`. 0 for pass, 1 for fail.
"""
try:
import pytest
except ImportError:
raise RuntimeError('Testing module `pytest` is not installed. Run `conda install pytest`')
abs_test_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "tests"])
command = ['-rws', '-v']
if extent.lower() == 'smoke':
command.extend(['-m', 'smoke'])
elif extent.lower() == 'quick':
command.extend(['-m', 'quick or smoke'])
elif extent.lower() == 'full':
command.extend(['-m', 'not long'])
elif extent.lower() == 'long':
pass
if extras is not None:
command.extend(extras)
command.extend(['--capture=sys', abs_test_dir])
retcode = pytest.main(command)
return retcode
|
ashutoshvt/psi4
|
psi4/extras.py
|
Python
|
lgpl-3.0
| 6,694
|
[
"CFOUR",
"Psi4"
] |
5d551b1b7a593d067e041a87116b72e312c91691952dd7a08af55bd4bae48f8d
|
from .visitor import NodeVisitor
from ._compat import iteritems
VAR_LOAD_PARAMETER = 'param'
VAR_LOAD_RESOLVE = 'resolve'
VAR_LOAD_ALIAS = 'alias'
VAR_LOAD_UNDEFINED = 'undefined'
def find_symbols(nodes, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(node, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols(object):
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level = level
self.parent = parent
self.refs = {}
self.loads = {}
self.stores = set()
def analyze_node(self, node, **kwargs):
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
ident = 'l_%d_%s' % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target):
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
def find_ref(self, name):
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
raise AssertionError('Tried to resolve a name to a reference that '
'was unknown to the frame (%r)' % name)
return rv
def copy(self):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name):
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name):
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name):
target = self.find_ref(name)
if target is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols):
stores = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in iteritems(stores):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
assert target is not None, 'should not happen'
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self):
rv = {}
node = self
while node is not None:
for name in node.stores:
if name not in rv:
rv[name] = self.find_ref(name)
node = node.parent
return rv
def dump_param_targets(self):
rv = set()
node = self
while node is not None:
for target, (instr, _) in iteritems(self.loads):
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
_simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=('call',)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch='body', **kwargs):
if for_branch == 'body':
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == 'else':
branch = node.else_
elif for_branch == 'test':
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError('Unknown for branch')
for item in branch or ():
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError('Cannot find symbols for %r' %
node.__class__.__name__)
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols):
self.symbols = symbols
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == 'param':
self.symbols.declare_parameter(node.name)
elif node.ctx == 'store':
self.symbols.store(node.name)
elif node.ctx == 'load':
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
self.symbols.load(node.name)
def visit_If(self, node, **kwargs):
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes):
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node, **kwargs):
self.symbols.store(node.name)
def visit_Import(self, node, **kwargs):
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node, **kwargs):
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node, **kwargs):
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node, **kwargs):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node, **kwargs):
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node, **kwargs):
self.visit(node.filter, **kwargs)
def visit_With(self, node, **kwargs):
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node, **kwargs):
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node, **kwargs):
"""Stop visiting at scopes."""
def visit_Block(self, node, **kwargs):
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node, **kwargs):
"""Do not visit into overlay scopes."""
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/jinja2/idtracking.py
|
Python
|
mit
| 9,185
|
[
"VisIt"
] |
693af84eec360ae7bd8d70d8242aee6126e91db2bdae4752f017942231f30fd2
|
"""
Harris Corner Detection
functions:
cv2.
cornerHarris()
cornerSubPix()
corners - regions in image with large variation in intensity in all directions
detect by:
find difference in intensity for a displacement of (u, v) in all directions
E(u, v) = sum(wrt x,y) w(x, y) [I(x+u, y+v) - I(x, y)]^2
w is window function, , I(x+u, y+v) shifted intensity, I(x, y) intensity
window function either rect window or gaussian window (gives weights to underneath pixels)
have to maximize E(u, v) for corner detection
-> have to maximize 2nd term
apply taylor expansion, get
E(u, v) ~= [u v] M [u v]'
M = sum(wrt x, y) w(x, y) [I_x I_y] x [I_x; I_y]
I_x and I_y are image derivs in x and y dirs, respectively
can be found with cv2.Sobel()
After, they created a score (an eq'n) that determines if window contains corner or not
R = det(M) - k(trace(M))^2
det(M) = \lambda_1 * \lambda_2
trace(M) = \lambda_1 + \lambda_2
\lambda_1 & \lambda_2 are eigenvals of M
values of eigenvals determine whether region is corner, edge, or flat
when |R| small, when \lambda_1 & \lambda_2 small, region flat
when R < 0, when \lambda_1 >> \lambda_2 or vv, region is edge
when R large, when \lambda_1 & \lambda_2 large and \lambda_1 ~ \lambda_2, region is a corner
see: http://docs.opencv.org/3.1.0/harris_region.jpg
Result of Harris corner detection: grayscale image with these scores
thresholding for suitable gives corners of image
"""
# Harris Corner Detector in OpenCV
# function: cv2.cornerHarris()
# arguments:
# img - input image; grayscale and float32 type
# blockSize - size of nbhd considered for corner detection
# ksize - aperture parameter of Sobel deriv used
# k - Harris detector free parameter in eq'n
# ex.
import cv2
import numpy as np
filename = 'chessboard.png'
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
# result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image
img[dst>0.01*dst.max()] = [0, 0, 255]
cv2.imshow('dst', img)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows
|
SSG-DRD-IOT/commercial-iot-security-system
|
opencv/tutorials/featureDetection/harris_corner/harris_corner.py
|
Python
|
mit
| 2,344
|
[
"Gaussian"
] |
cf903d26a18bfaa5e4b7742efe33321cc95e4d539e6216770007a5970d73b7d7
|
#!/usr/bin/env python
# Copyright (C) 2013-2018 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module parses the feature definition file features.def
#
import fileinput
import string
import re
class SyntaxError:
def __init__(self, message, instead):
self.message = message
self.filename = fileinput.filename()
self.lineno = fileinput.filelineno()
self.instead = instead
def __str__(self):
return '%s: %2d: %s in the following line:\n%s' % \
(self.filename, self.lineno, self.message, self.instead)
def toCPPExpr(expr):
expr = expr.replace('and', ' && ')
expr = expr.replace('or', ' || ')
expr = expr.replace('not', ' !')
expr = re.sub('([A-Z0-9_]+)', 'defined(\\1)', expr)
return expr
class defs:
def __init__(self, filename):
# complete set of all defined features
allfeatures = set()
# allfeatures minus externals and derived
features = set()
# list of implications (pairs of feature -> implied feature)
implications = list()
# list of requirements (pairs of feature -> requirement expr)
requirements = list()
# set of derived features
derived = set()
# list of derivations (pairs of feature -> derivation expr)
derivations = list()
# list of external features
externals = set()
# list of features that are to be tested
notestfeatures = set()
for line in fileinput.input(filename):
line = line.strip()
# Ignore empty and comment lines
if not line or line.startswith('#') \
or line.startswith('//') or line.startswith('/*'):
continue
# Tokenify line
tokens = line.split(None, 2)
# Register the feature
feature = tokens.pop(0)
allfeatures.add(feature)
# get the keyword
if tokens:
keyword = tokens.pop(0)
if not tokens:
rest = None
else:
rest = tokens[0]
# derived
if keyword == 'equals':
if rest is None:
raise SyntaxError("<feature> equals <expr>", line)
if feature in derived:
raise SyntaxError(
"Derived feature is already defined above:", line)
if feature in externals:
raise SyntaxError(
"Derived feature is already defined as external above:", line)
derived.add(feature)
derivations.append((feature, rest, toCPPExpr(rest)))
# externals
elif keyword == 'external':
if rest is not None:
raise SyntaxError("<feature> external", line)
if feature in derived:
raise SyntaxError(
"External feature is already defined as derived above:", line)
implied = set(map((lambda x_y: x_y[1]), implications))
if feature in implied:
raise SyntaxError(
"External feature is implied above:", line)
externals.add(feature)
# implications
elif keyword == 'implies':
if rest is None:
raise SyntaxError(
"<feature> implies [<feature>...]", line)
tokens = rest.split()
for implied in tokens:
if implied.endswith(','):
implied = implied[:-1]
if implied in externals:
raise SyntaxError(
"Implied feature %s is already defined as external above:" % feature, line)
implications.append((feature, implied))
# requires
elif keyword == 'requires':
if rest is None:
raise SyntaxError("<feature> requires <expr>", line)
requirements.append((feature, rest, toCPPExpr(rest)))
elif keyword == 'notest':
if rest is not None:
raise SyntaxError("<feature> notest", line)
notestfeatures.add(feature)
features = allfeatures.difference(derived)
features = features.difference(externals)
self.allfeatures = allfeatures
self.features = features
self.requirements = requirements
self.implications = implications
self.derived = derived
self.derivations = derivations
self.externals = externals
self.notestfeatures = notestfeatures
def check_validity(self, activated):
"""Check whether a set of features is valid.
Returns None if it is not and the set of features including implied features if it is.
"""
newset = activated.copy()
# print "Verifying: " + str(activated) + "..."
# handle implications
for feature, implied in self.implications:
# print feature, ' -> ', implied
if feature in newset and not implied in newset:
newset.add(implied)
# print 'Implied set: ' + str(newset)
# handle requirements
featurevars = dict()
derived = list(map((lambda x_y_z: x_y_z[0]), self.derivations))
allfeatures = self.features.union(derived, self.externals)
for feature in allfeatures:
featurevars[feature] = feature in newset
for feature, expr, undef in self.requirements:
# print 'Requirement: ', feature, ' -> ', expr
if feature in newset:
if not eval(expr, featurevars):
return None
# print 'Resulting set: ' + str(newset)
return newset
# Test whether all implied features or features in an expression are defined
|
mkuron/espresso
|
src/config/featuredefs.py
|
Python
|
gpl-3.0
| 6,855
|
[
"ESPResSo"
] |
9e03cb5af2ef30eb49de193330a9e4c081d85c4631d8db383590c78af942363b
|
#!/usr/bin/env python3
"""
Copyright 2017 Jocelyn Falempe kdj0c@djinvi.net
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
# Adjust defense and attack cost, to match onepagerules current prices
adjust_defense_cost = 0.7
adjust_attack_cost = 0.7
# Cost per defense point (2+ => 6, 6+ => 24, 10+ => 58)
def defense_cost(d):
return (0.9 * d * d + d + 10.0) / 2.0
# defense cost multiplier (higher quality units are tougher, due to moral test)
# 1 for 2+ quality, 0.6 for 6+ quality
def quality_defense_factor(q):
return 1.0 - 0.1 * (q - 2.0)
# Attack cost multiplier, probability to hit according to quality
# 5/6 for 2+, 1/6 for 6+
def quality_attack_factor(q):
return (7.0 - q) / 6.0
# AP cost multiplier.
# 1 for no AP, * 1.2 for each AP point
def ap_cost(ap):
return (1.2 ** ap)
# Range cost multiplier.
# melee threaten range is charge distance (12" = speed)
# guns threaten range is advance distance (6" = speed/2) + weapon range
def range_cost(wrange, speed):
if wrange == 0:
c = speed ** 0.75
else:
c = (wrange + speed / 2) ** 0.75
return c
# Handle D3+1 or 2D6+2 values
# return the mean to calculate the cost
def dice_mean(value):
if isinstance(value, int):
return value
n, rem = value.split('D')
if '+' in rem:
sides, add = rem.split('+')
else:
sides, add = rem, 0
mean = (int(sides) + 1) / 2.0
if n:
mean *= int(n)
return mean + int(add)
# WargGear can include special rules for model, and weapons
# Like a jetbike gives "fast" rules and a Linked ShardGun
class WarGear:
def __init__(self, name='Unknown Gear', special=[], weapons=[], text=''):
self.name = name
self.specialRules = special
self.weapons = weapons
self.text = text
@classmethod
def from_dict(self, name, data, armory):
data['weapons'] = armory.get(data.get('weapons', []))
return self(name, **data)
def Profile(self):
if self.text:
return '(' + self.text + ')'
else:
return '(' + ', '.join(self.specialRules + [str(w) for w in self.weapons]) + ')'
def __str__(self):
s = self.name + ' ' + self.Profile()
def Cost(self, speed, quality):
cost = 0
for w in self.weapons:
cost += w.Cost(speed, quality)
return cost
# Class for weapons
class Weapon:
def __init__(self, name='Unknown Weapon', range=0, attacks=0, ap=0, special=[]):
self.name = name
self.range = range
self.attacks = attacks
self.armorPiercing = ap
self.weaponRules = special
self.specialRules = []
self.cost = 0
def __repr__(self):
return "{0}({1})".format(self.name, self.__dict__)
def Profile(self):
def fmtnz(value, fmt):
if value:
return [fmt.format(value)]
return []
prof = fmtnz(self.range, '{0}"') + ['A{0}'.format(self.attacks)] + fmtnz(self.armorPiercing, 'AP({0})')
# Remove "Linked" special rule if the name contain "Linked"
prof += [wr for wr in self.weaponRules if wr != 'Linked' or 'Linked' not in self.name.split()]
return '(' + ', '.join(prof) + ')'
def __str__(self):
return self.name + ' ' + self.Profile()
def Pretty(self):
if self.cost:
s = str(self.cost) + " pts "
else:
s = ''
s += self.__str__()
return s
def Cost(self, speed, quality):
sfactor = 1
simpact = 0
rending = 0
wrange = self.range
ap = dice_mean(self.armorPiercing)
attacks = dice_mean(self.attacks)
for s in self.weaponRules:
if s == 'Deadly':
sfactor *= 2.5
elif s == 'Linked':
quality -= 1
elif s == 'Rending':
# rending is 1/6 of having AP(8)
rending = (1 / 6) * (ap_cost(8) - ap_cost(ap))
elif s == 'Flux':
# Flux is statistically like having quality +2
quality -= 2
elif s.startswith('Poison'):
ap += int(s[7:-1]) / 2
elif s.startswith('Blast'):
sfactor *= int(s[6:-1])
elif s.startswith('Impact'):
simpact = int(s[7:-1])
elif s == 'Autohit':
quality = 1
elif s == 'Limited':
sfactor /= 2
elif s == 'Secondary':
sfactor /= 4
elif s == 'Sniper':
# Sniper is 2+ hit and ignore cover (so statistically half an ap)
quality = 2
ap += 0.5
elif s == 'Indirect':
wrange *= 1.4
elif s == 'Anti-Air':
sfactor *= 1.10
self.cost = sfactor * attacks * range_cost(wrange, speed) * (ap_cost(ap) * quality_attack_factor(quality) + rending)
# Impact weapon have automatic hit, but only when charging (so 0.5 cost of the same weapon without quality factor)
if simpact:
self.cost += 0.5 * simpact * sfactor * ap_cost(ap) * range_cost(wrange, speed)
self.cost = int(round(self.cost * adjust_attack_cost))
return self.cost
class Armory(dict):
# Armory class is a dictionnary of all Weapons and WarGear for a faction.
def __init__(self, *args):
dict.__init__(self, args)
# get one equipment from the armory.
def getOne(self, name):
if name in self:
return self[name]
if name.endswith('s'):
singular = name[:-1]
if singular in self:
self[name] = copy.copy(self[singular])
self[name].name = name
return self[name]
print('Error equipment {0} Not found !'.format(name))
return None
# Return the list of equipments objects, from their names.
# if the name start with "2x ", return twice the same object in the list.
def get(self, names):
for name in names:
if ' ' in name:
firstword, remaining = name.split(' ', 1)
if firstword.endswith('x') and firstword[:-1].isdigit():
n = int(firstword[:-1])
position = names.index(name)
names.remove(name)
for i in range(n):
names.insert(position, remaining)
return [self.getOne(name) for name in names]
# Add an equipments to the armory
# if it's a weapon, also add the Linked variant
def add(self, equipments):
for equipment in equipments:
if equipment.name in self:
print('Error {} is defined twice'.format(equipment.name))
continue
self[equipment.name] = equipment
if isinstance(equipment, Weapon):
if equipment.range > 0 and 'Linked' not in equipment.specialRules:
name = 'Linked ' + equipment.name
self[name] = Weapon(name, equipment.range, equipment.attacks, equipment.armorPiercing, ['Linked'] + equipment.weaponRules)
class Unit:
def __init__(self, name='Unknown Unit', count=1, quality=4, defense=2, equipments=[], special=[]):
self.name = name
self.specialRules = special
self.equipments = equipments
self.quality = quality
self.basedefense = defense
self.count = count
self.upgrades = []
self.factionCost = 0
self.Update()
def __str__(self):
pretty = '{0} [{1}] {2} pts\n\t'.format(self.name, self.count, self.cost)
for w in self.equipments:
pretty += str(w) + '\n\t'
pretty += ', '.join(self.specialRules)
pretty += '\n\t'
pretty += 'Defense {0} pts, Attack {1} pts, Other {2} pts\n'.format(self.defenseCost, self.attackCost, self.otherCost)
return pretty
def __copy__(self):
return Unit(self.name, self.count, self.quality, self.basedefense, self.equipments.copy(), self.specialRules.copy())
@classmethod
def from_dict(self, data, armory):
data['equipments'] = armory.get(data.pop('equipment'))
return self(**data)
def Update(self):
self.wargearSp = [sp for equ in self.equipments for sp in equ.specialRules]
self.parseSpecialRules()
self.Cost()
def AddEquipments(self, equipments):
self.equipments += equipments
self.Update()
def RemoveEquipment(self, e):
if e in self.equipments:
self.equipments.remove(e)
return
if e.name.endswith('s'):
singular = e.name[:-1]
for equ in self.equipments:
if singular == equ.name:
self.equipments.remove(equ)
return
print("ERROR unit {0}, '{1}' not in current equipments '{2}'".format(self.name, e, self.equipments))
def RemoveEquipments(self, equipments):
for e in equipments:
self.RemoveEquipment(e)
self.Update()
def SetCount(self, count):
self.count = count
self.Cost()
def SetFactionCost(self, cost):
self.factionCost = cost
self.Cost()
def AttackCost(self):
self.attackCost = 0
for w in self.equipments + self.spEquipments:
self.attackCost += w.Cost(self.speed, self.attackQuality) * self.count
self.attackCost = int(round(self.attackCost))
def DefenseCost(self):
self.defenseCost = quality_defense_factor(self.defenseQuality) * defense_cost(self.defense) * self.tough
# include speed to defense cost. hardened target which move fast are critical to control objectives.
self.defenseCost *= (self.speed + 24) / (36)
self.defenseCost *= adjust_defense_cost * self.count
self.defenseCost = int(round(self.defenseCost))
# attack and defense cost should already be computed
def OtherCost(self):
self.otherCost = self.globalAdd
# For transporter, the cost depends on defense, and speed
self.otherCost += self.passengers * (self.defenseCost / 150) * (self.speed / 12)
self.otherCost += (self.attackCost + self.defenseCost) * self.globalMultiplier
self.otherCost = int(round(self.otherCost))
def Cost(self):
self.DefenseCost()
self.AttackCost()
self.OtherCost()
self.cost = self.defenseCost + self.attackCost + self.otherCost + self.factionCost
return self.cost
def parseSpecialRules(self):
self.speed = 12
self.globalAdd = 0
self.globalMultiplier = 0
self.tough = 1
self.defense = self.basedefense
self.spEquipments = []
self.passengers = 0
self.attackQuality = self.quality
self.defenseQuality = self.quality
specialRules = self.specialRules + self.wargearSp
if 'Vehicle' in specialRules or 'Monster' in specialRules:
smallStomp = Weapon('Monster Stomp', special=['Impact(3)'])
self.spEquipments.append(smallStomp)
if 'Fear' not in specialRules:
specialRules.append('Fear')
if 'Titan' in specialRules:
titanStomp = Weapon('Titan Stomp', 0, 6, 2, ['Autohit'])
self.spEquipments.append(titanStomp)
if 'Fear' not in specialRules:
specialRules.append('Fear')
if 'Airdrop' in specialRules:
self.speed = 0
if 'Slow' in specialRules:
self.speed = 8
if 'Fast' in specialRules:
self.speed = 18
if 'Very Fast' in specialRules:
self.speed = 24
if 'Stealth' in specialRules:
# Stealth is like +0.5 def, because it works only against ranged attack
self.defense += 0.5
if 'Good Shot' in specialRules:
self.attackQuality = 4
if 'Bad Shot' in specialRules:
self.attackQuality = 5
if 'Furious' in specialRules:
self.globalAdd = 1
if 'Fearless' in specialRules:
self.defenseQuality -= 1
if 'Ambush' in specialRules:
if 'Scout' in specialRules:
# Ambush and scout doesn't stack, since you can't use both
self.globalMultiplier += 0.2
else:
self.globalMultiplier += 0.10
if 'Scout' in specialRules:
self.globalMultiplier += 0.15
if 'Beacon' in specialRules:
self.globalAdd += 10
if 'Fear' in specialRules:
self.globalAdd += 5
if 'Strider' in specialRules:
self.speed *= 1.2
if 'Flying' in specialRules:
self.speed *= 1.3
# Flyers moves 36" but only in straight line.
if 'Flyer' in specialRules:
self.speed = 24
for s in specialRules:
if s.startswith('Tough('):
self.tough = int(s[6:-1])
elif s.startswith('Transport('):
self.passengers = int(s[10:-1])
elif s.startswith('Transport+'):
self.passengers += int(s[10:])
elif s.startswith('Psychic('):
self.globalAdd += int(s[8:-1]) * 7
elif s.startswith('Psychic+'):
self.globalAdd += int(s[8:]) * 7
elif s.startswith('Defense+'):
self.defense += int(s[8:])
if 'Regeneration' in specialRules:
self.tough *= 4 / 3
def main():
flamethrower = Weapon('Flamethrower', 12, 6, 0)
flamethrower.Cost(12, 4)
print(flamethrower.Pretty())
gatling = Weapon('Gatling', 18, 4, 1)
gatling.Cost(12, 4)
print(gatling.Pretty())
pulserifle = Weapon('Pulse Rifle', 30, 1, 1)
pulserifle.Cost(12, 4)
print(pulserifle.Pretty())
plasma = Weapon('Plasma', 24, 1, 3, '')
plasma.Cost(12, 4)
print(plasma.Pretty())
fusion = Weapon('Fusion Carbine', 18, 1, 7, ['Deadly'])
fusion.Cost(12, 4)
print(fusion.Pretty())
railgun = Weapon('Railgun', 48, 1, 4, ['Linked', 'Deadly'])
railgun.Cost(12, 4)
print(railgun.Pretty())
gundrone = WarGear('Gun Drone', ["Regeneration"], [pulserifle, railgun])
print(gundrone)
vehicle = Weapon('Vehicle', weaponRules=['Impact(3)'])
vehicle.Cost(12, 4)
print(vehicle)
grunt = Unit('Grunt', 5, 5, 4, [pulserifle, gundrone], ['Good Shot'])
print(grunt)
grunt_cpt = Unit('Grunt_cpt', 1, 4, 4, [pulserifle], ['Tough(3)', 'Hero', 'Volley Fire'])
print(grunt_cpt)
suitfist = Weapon('Suit Fist', 0, 4, 1)
battlesuit_cpt = Unit('Battlesuit Captain', 1, 3, 6, [suitfist], ['Ambush', 'Flying', 'Hero', 'Tough(3)'])
print(battlesuit_cpt)
if __name__ == "__main__":
# execute only if run as a script
main()
|
kdj0c/onepagepoints
|
onepagepoints.py
|
Python
|
mit
| 15,902
|
[
"BLAST"
] |
7c149954a408cc9d5242b6365149c0f608e8b06dc9403e9e7728879f6fe13f20
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("testing-cookiecutter.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
aniketmaithani/testing-cookiecutter
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,242
|
[
"VisIt"
] |
523816d52d257718957437ef61dc4a9cacc4e3e2d84e146ed3d6705435b7a317
|
#!/usr/bin/env python3
import os
from setuptools import setup
def version():
setupDir = os.path.dirname(os.path.realpath(__file__))
versionFile = open(os.path.join(setupDir, 'checkm', 'VERSION'))
return versionFile.readline().strip()
setup(
name='checkm-genome',
version=version(),
author='Donovan Parks, Michael Imelfort, Connor Skennerton',
author_email='donovan.parks@gmail.com',
packages=['checkm', 'checkm.plot', 'checkm.test', 'checkm.util'],
scripts=['bin/checkm'],
package_data={'checkm': ['VERSION', 'DATA_CONFIG']},
include_package_data=True,
url='http://pypi.python.org/pypi/checkm/',
license='GPL3',
description='Assess the quality of putative genome bins.',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
"numpy >= 1.13.1",
"scipy >= 0.19.1",
"matplotlib >= 2.1.0",
"pysam >= 0.12.0.1",
"dendropy >= 4.4.0",
"setuptools"],
zip_safe=False
)
|
Ecogenomics/CheckM
|
setup.py
|
Python
|
gpl-3.0
| 1,246
|
[
"pysam"
] |
7802f332f6980ebec1d54ebf8c373982611b79697607bce54495773f3e1305e3
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.fitting Contains functions used for fitting models to two-dimensional data.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import warnings
import numpy as np
from scipy import stats
# Import astronomical modules
from astropy.modeling import models, fitting
# Import the relevant PTS classes and modules
from . import general, statistics
from ..basics.vector import Position, Extent
from ..basics.coordinate import PixelCoordinate
# -----------------------------------------------------------------
def linear_regression(x, y):
"""
This function ...
:param x:
:param y:
:return:
"""
# Fit
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
# Return
return slope, intercept
# -----------------------------------------------------------------
def get_linear_fit_parameters(x, y, xlog=False, ylog=False):
"""
This function ...
:param x:
:param y:
:param xlog:
:param ylog:
:return:
"""
# Convert?
if xlog: x = np.log10(x)
if ylog: y = np.log10(y)
# Keep only finite values
valid = np.isfinite(x) * np.isfinite(y)
x = x[valid]
y = y[valid]
# Perform fit
slope, intercept = linear_regression(x, y)
# Return
return slope, intercept
# -----------------------------------------------------------------
def get_linear_values(x, slope, intercept, xlog=False, ylog=False):
"""
Thins function ...
:param x:
:param slope:
:param intercept:
:param xlog:
:param ylog:
:return:
"""
# Calculate new values
if xlog: x = np.log10(x)
y = slope * x + intercept
# Convert?
if ylog: y = 10 ** y
# Return
return y
# -----------------------------------------------------------------
def get_linear_fitted_values(x, y, new_x, xlog=False, ylog=False, return_parameters=False):
"""
This function ...
:param x:
:param y:
:param new_x:
:param xlog:
:param ylog:
:param return_parameters:
:return:
"""
# Get the parameters
slope, intercept = get_linear_fit_parameters(x, y, xlog=xlog, ylog=ylog)
# Get the values
new = get_linear_values(new_x, slope, intercept, xlog=xlog, ylog=ylog)
# Return
if return_parameters: return new, slope, intercept
else: return new
# -----------------------------------------------------------------
def fit_polynomial(data, degree, mask=None, sigma_clip_background=False, show_warnings=False,
fitter="levenberg-marquardt", zero_order=None):
"""
This function ...
:param data:
:param degree:
:param mask:
:param sigma_clip_background:
:param show_warnings:
:param fitter:
:return:
"""
if sigma_clip_background: mask = statistics.sigma_clip_mask(data, sigma_level=3.0, mask=mask)
# Fit the data using astropy.modeling
poly_init = models.Polynomial2D(degree=degree)
# Set initial values
if zero_order is not None: poly_init.c0_0 = zero_order
#poly_init.c1_0 = 1.0
#poly_init.c2_0 = 2.0
#poly_init.c3_0 = 3.0
#poly_init.c0_1 = 4.0
#poly_init.c0_2 = 5.0
#poly_init.c0_3 = 6.0
#poly_init.c1_1 = 7.0
#poly_init.c1_2 = 8.0
#poly_init.c2_1 = 9.0
# Get the fitter
if fitter == "levenberg-marquardt": fit_model = fitting.LevMarLSQFitter()
elif fitter == "linear":
if degree > 1: raise ValueError("Cannot use linear fitter for polynomials with degree > 1")
fit_model = fitting.LinearLSQFitter()
elif fitter == "simplex": fit_model = fitting.SimplexLSQFitter()
elif fitter == "sequential_least_squares": fit_model = fitting.SLSQPLSQFitter()
else: raise ValueError("Invalid vlaue for 'fitter'")
# Split x, y and z values that are not masked
x_values, y_values, z_values = general.split_xyz(data, mask=mask, arrays=True)
# Ignore model linearity warning from the fitter
if show_warnings: poly = fit_model(poly_init, x_values, y_values, z_values)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
poly = fit_model(poly_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Return the polynomial model and the new mask
if sigma_clip_background: return poly, mask
# Return the polynomial model
else: return poly
# -----------------------------------------------------------------
def all_zero_parameters(polynomial):
"""
This function ...
:param polynomial:
:return:
"""
from ...core.tools import sequences
return sequences.all_equal_to(polynomial.parameters, 0.0)
# -----------------------------------------------------------------
def evaluate_model(model, x_min, x_max, y_min, y_max, x_delta=1, y_delta=1):
"""
This function ...
:param model:
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param x_delta:
:param y_delta:
:return:
"""
# Create x and y meshgrid for evaluating the model
y_plotvalues, x_plotvalues = np.mgrid[y_min:y_max:y_delta, x_min:x_max:x_delta]
# Evaluate the model
evaluated_model = model(x_plotvalues, y_plotvalues)
# Return the evaluated data
return evaluated_model
# -----------------------------------------------------------------
# NOT USED CURRENTLY
def fit_two_2D_Gaussians(box, x_shift=0.0, y_shift=0.0, zoom_factor=1.0, mask=None):
"""
This function ...
:param box:
:param x_shift:
:param y_shift:
:param zoom_factor:
:param mask:
:return:
"""
# Get the dimensions of the box
box_ysize = box.shape[0]
box_xsize = box.shape[1]
upperright_x = 0.75*box_xsize
upperright_y = 0.75*box_ysize
lowerleft_x = 0.25*box_xsize
lowerleft_y = 0.25*box_ysize
init_x_stddev = 0.2*box_xsize
init_y_stddev = 0.2*box_ysize
two_gaussians_init = models.Gaussian2D(amplitude=1., x_mean=upperright_x, y_mean=upperright_y, x_stddev=init_x_stddev,
y_stddev=init_y_stddev) + \
models.Gaussian2D(amplitude=1., x_mean=lowerleft_x, y_mean=lowerleft_y, x_stddev=init_x_stddev,
y_stddev=init_y_stddev)
fit_model = fitting.LevMarLSQFitter()
x_values = []
y_values = []
z_values = []
for x in range(box_xsize):
for y in range(box_ysize):
# If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
if mask is None or not mask[y,x]:
x_values.append(x)
y_values.append(y)
z_values.append(box[y,x])
# Ignore model linearity warning from the fitter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
two_gaussians = fit_model(two_gaussians_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Adjust the position of the model to a different coordinate frame
if zoom_factor > 1.0:
two_gaussians.x_mean_0.value = two_gaussians.x_mean_0.value/zoom_factor + x_shift
two_gaussians.y_mean_0.value = two_gaussians.y_mean_0.value/zoom_factor + y_shift
two_gaussians.x_stddev_0.value /= zoom_factor
two_gaussians.y_stddev_0.value /= zoom_factor
two_gaussians.x_mean_1.value = two_gaussians.x_mean_1.value/zoom_factor + x_shift
two_gaussians.y_mean_1.value = two_gaussians.y_mean_1.value/zoom_factor + y_shift
two_gaussians.x_stddev_1.value /= zoom_factor
two_gaussians.y_stddev_1.value /= zoom_factor
else:
two_gaussians.x_mean_0.value += x_shift
two_gaussians.y_mean_0.value += y_shift
two_gaussians.x_mean_1.value += x_shift
two_gaussians.y_mean_1.value += y_shift
# Return the model
return two_gaussians
# -----------------------------------------------------------------
def fit_2D_ShiftedGaussian(box, center=None, fixed_center=False, max_center_offset=None, sigma=None, zoom_factor=1.0, mask=None, amplitude=None):
"""
This function ...
:param box:
:param center:
:param fixed_center:
:param max_center_offset:
:param sigma:
:param zoom_factor:
:param mask:
:param amplitude:
:return:
"""
# Compound model class that represent a Gaussian function that can be shifted up and down
ShiftedGaussian = models.Gaussian2D + models.Const2D
# Parameters are: amplitude_0, x_mean_0, y_mean_0, x_stddev_0, y_stddev_0, theta_0, amplitude_1
# -----------------------------------------------------------------
def fit_2D_Gaussian(box, center=None, fixed_center=False, max_center_offset=None, sigma=None, zoom_factor=1.0, mask=None, amplitude=None, max_sigma_offset=None):
"""
This function ...
:param box:
:param center:
:param fixed_center:
:param deviation_center:
:param radius:
:param x_shift:
:param y_shift:
:param zoom_factor:
:param mask:
:return:
"""
# Get the dimensions of the box
box_xsize = box.xsize
box_ysize = box.ysize
# Set the initial guess for the center of the model (the one that is specified, otherwise the center of the box)
init_xmean = center.x if center is not None else 0.5*(box.xsize-1)
init_ymean = center.y if center is not None else 0.5*(box.ysize-1)
# Set the initial guess for the width of the model (the one that is specified, otherwise one tenth of the size of the box)
init_x_stddev = sigma if sigma is not None else 0.1*box.xsize
init_y_stddev = sigma if sigma is not None else 0.1*box.ysize
# Initialize an empty dictionary to specify fixed parameters
fixed_parameters = {'theta': 0.0}
if fixed_center:
fixed_parameters['x_mean'] = True
fixed_parameters['y_mean'] = True
# Initialize an empty dictionary to specify bounds
bounds = {}
if max_center_offset is not None:
bounds['x_mean'] = [init_xmean-max_center_offset, init_xmean+max_center_offset]
bounds['y_mean'] = [init_ymean-max_center_offset, init_ymean+max_center_offset]
if max_sigma_offset is not None:
bounds["x_stddev"] = [init_x_stddev - max_sigma_offset, init_x_stddev + max_sigma_offset]
bounds["y_stddev"] = [init_y_stddev - max_sigma_offset, init_y_stddev + max_sigma_offset]
# Define the 'tied' dictionary to specify that the y_stddev should vary along with x_stddev
tied = {'y_stddev': (lambda model: model.x_stddev)}
# Fit the data using astropy.modeling
gaussian_init = models.Gaussian2D(amplitude=1., x_mean=init_xmean, y_mean=init_ymean, x_stddev=init_x_stddev,
y_stddev=init_y_stddev, fixed=fixed_parameters, bounds=bounds, tied=tied)
fit_model = fitting.LevMarLSQFitter()
x_values = []
y_values = []
z_values = []
for x in range(box_xsize):
for y in range(box_ysize):
# If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
if mask is None or not mask[y,x]:
x_values.append(x)
y_values.append(y)
z_values.append(box[y,x])
# Ignore model linearity warning from the fitter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
gaussian = fit_model(gaussian_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Fix negative sigmas
if gaussian.x_stddev.value < 0: gaussian.x_stddev.value = -gaussian.x_stddev.value
if gaussian.y_stddev.value < 0: gaussian.y_stddev.value = -gaussian.y_stddev.value
# Adjust the position of the model to a different coordinate frame
if zoom_factor > 1.0:
gaussian.x_mean.value = gaussian.x_mean.value/zoom_factor
gaussian.y_mean.value = gaussian.y_mean.value/zoom_factor
gaussian.x_stddev.value /= zoom_factor
gaussian.y_stddev.value /= zoom_factor
# Return the Gaussian model
return gaussian
# -----------------------------------------------------------------
def fit_2D_Airy(box, center=None, fixed_center=False, max_center_offset=None, radius=None, zoom_factor=1.0, mask=None, amplitude=None):
"""
This function ...
:param box:
:param center:
:param fixed_center:
:param deviation_center:
:param radius:
:param x_shift:
:param y_shift:
:param zoom_factor:
:param mask:
:return:
"""
# Get the dimensions of the box
box_ysize = box.shape[0]
box_xsize = box.shape[1]
# Set the initial guess for the center of the model (the one that is specified, otherwise the center of the box)
init_x0 = center.x if center is not None else 0.5*(box_xsize-1)
init_y0 = center.y if center is not None else 0.5*(box_ysize-1)
# Set the initial radius for the model (the one that is specified, otherwise one tenth of the width of the box)
init_radius = radius if radius is not None else 0.1*box_xsize
# Initialize an empty dictionary to specify fixed parameters
fixed_parameters = {}
if fixed_center:
fixed_parameters['x_0'] = True
fixed_parameters['y_0'] = True
# Initialize an empty dictionary to specify bounds
bounds = {}
if max_center_offset is not None:
bounds['x_mean'] = [init_x0-max_center_offset, init_x0+max_center_offset]
bounds['y_mean'] = [init_y0-max_center_offset, init_y0+max_center_offset]
# Fit the data using astropy.modeling
airy_init = models.AiryDisk2D(amplitude=1., x_0=init_x0, y_0=init_y0, radius=init_radius, fixed=fixed_parameters, bounds=bounds)
fit_model = fitting.LevMarLSQFitter()
x_values = []
y_values = []
z_values = []
for x in range(box_xsize):
for y in range(box_ysize):
# If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
if mask is None or not mask[y,x]:
x_values.append(x)
y_values.append(y)
z_values.append(box[y,x])
# Ignore model linearity warning from the fitter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
airy = fit_model(airy_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Adjust the position of the model to a different coordinate frame
if zoom_factor > 1.0:
airy.x_0.value = airy.x_0.value/zoom_factor
airy.y_0.value = airy.y_0.value/zoom_factor
airy.radius /= zoom_factor
# Return the fitted two-dimensional Airy Disk model
return airy
# -----------------------------------------------------------------
def fit_2D_Moffat(box, center=None, fixed_center=False, deviation_center=None, x_shift=0.0, y_shift=0.0,
zoom_factor=1.0, mask=None):
"""
This function ...
:param box:
:param center:
:param fixed_center:
:param deviation_center:
:param x_shift:
:param y_shift:
:param zoom_factor:
:param mask:
:return:
"""
# Get the dimensions of the box
box_ysize = box.shape[0]
box_xsize = box.shape[1]
# Set the initial guess for the center of the model (the one that is specified, otherwise the center of the box)
init_x0 = center[0] if center is not None else 0.5*(box_xsize-1)
init_y0 = center[1] if center is not None else 0.5*(box_ysize-1)
# Initialize an empty dictionary to specify fixed parameters
fixed_parameters = {}
if fixed_center:
fixed_parameters['x_0'] = True
fixed_parameters['y_0'] = True
# Initialize an empty dictionary to specify bounds
bounds = {}
if deviation_center is not None:
bounds['x_mean'] = [init_x0-deviation_center, init_x0+deviation_center]
bounds['y_mean'] = [init_y0-deviation_center, init_y0+deviation_center]
# Fit the data using astropy.modeling
moffat_init = models.Moffat2D(amplitude=1., x_0=init_x0, y_0=init_y0, gamma=1.0, alpha=1.0, fixed=fixed_parameters, bounds=bounds)
fit_model = fitting.LevMarLSQFitter()
x_values = []
y_values = []
z_values = []
for x in range(box_xsize):
for y in range(box_ysize):
# If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
if mask is None or not mask[y,x]:
x_values.append(x)
y_values.append(y)
z_values.append(box[y,x])
# Ignore model linearity warning from the fitter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
moffat = fit_model(moffat_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Adjust the position of the model to a different coordinate frame
if zoom_factor > 1.0:
moffat.x_0.value = moffat.x_0.value/zoom_factor + x_shift
moffat.y_0.value = moffat.y_0.value/zoom_factor + y_shift
else:
moffat.x_0.value += x_shift
moffat.y_0.value += y_shift
# Return the fitted two-dimensional Moffat model
return moffat
# -----------------------------------------------------------------
def fit_2D_MexicanHat(box, center=None, fixed_center=False, deviation_center=None, radius=None, x_shift=0.0,
y_shift=0.0, zoom_factor=1.0, mask=None):
"""
This function ...
:param box:
:param center:
:param fixed_center:
:param deviation_center:
:param radius:
:param x_shift:
:param y_shift:
:param zoom_factor:
:param mask:
:return:
"""
# Get the dimensions of the box
box_ysize = box.shape[0]
box_xsize = box.shape[1]
# Set the initial guess for the center of the model (the one that is specified, otherwise the center of the box)
init_x0 = center[0] if center is not None else 0.5*(box_xsize-1)
init_y0 = center[1] if center is not None else 0.5*(box_ysize-1)
# Set the initial guess for the radius of the model (the one that is specified, otherwise one tenth of the width of the box)
init_sigma = radius if radius is not None else 0.1*box_xsize
# Initialize an empty dictionary to specify fixed parameters
fixed_parameters = {}
if fixed_center:
fixed_parameters['x_0'] = True
fixed_parameters['y_0'] = True
# Initialize an empty dictionary to specify bounds
bounds = {}
if deviation_center is not None:
bounds['x_mean'] = [init_x0-deviation_center, init_x0+deviation_center]
bounds['y_mean'] = [init_y0-deviation_center, init_y0+deviation_center]
# Fit the data using astropy.modeling
mexicanhat_init = models.MexicanHat2D(amplitude=1., x_0=init_x0, y_0=init_y0, sigma=init_sigma, fixed=fixed_parameters, bounds=bounds)
fit_model = fitting.LevMarLSQFitter()
x_values = []
y_values = []
z_values = []
for x in range(box_xsize):
for y in range(box_ysize):
# If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
if mask is None or not mask[y,x]:
x_values.append(x)
y_values.append(y)
z_values.append(box[y,x])
# Ignore model linearity warning from the fitter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mexicanhat = fit_model(mexicanhat_init, x_values, y_values, z_values) # What comes out is the model with the parameters set
# Adjust the position of the model to a different coordinate frame
if zoom_factor > 1.0:
mexicanhat.x_0.value = mexicanhat.x_0.value/zoom_factor + x_shift
mexicanhat.y_0.value = mexicanhat.y_0.value/zoom_factor + y_shift
mexicanhat.sigma.value /= zoom_factor
else:
mexicanhat.x_0.value += x_shift
mexicanhat.y_0.value += y_shift
# Return the fitted two-dimensional Mexican Hat model
return mexicanhat
# -----------------------------------------------------------------
def center(model):
"""
This function ...
:param model:
:return:
"""
if isinstance(model, models.Gaussian2D): return PixelCoordinate(x=model.x_mean.value, y=model.y_mean.value)
elif isinstance(model, models.AiryDisk2D): return PixelCoordinate(x=model.x_0.value, y=model.y_0.value)
else: raise ValueError("Unsupported model type: " + str(type(model)))
# -----------------------------------------------------------------
def sigma(model):
"""
This function ...
:param model:
:return:
"""
if isinstance(model, models.Gaussian2D): return Extent(x=model.x_stddev.value, y=model.y_stddev.value).norm
elif isinstance(model, models.AiryDisk2D): return airy_radius_to_gaussian_sigma(model.radius)
else: raise ValueError("Unsupported model type: " + str(type(model)))
# -----------------------------------------------------------------
def sigma_symmetric(model):
"""
This function ...
:param model:
:return:
"""
stddev = sigma(model)
if stddev.x != stddev.y: raise ValueError("x and y stddev are not equal")
return stddev.x
# -----------------------------------------------------------------
def airy_radius_to_gaussian_sigma(radius):
"""
This function ...
:param radius:
:return:
"""
return 0.42 * radius * 0.81989397882
# -----------------------------------------------------------------
def gaussian_sigma_to_airy_radius(sigma):
"""
This function ...
:param sigma:
:return:
"""
return sigma / (0.42 * 0.81989397882)
# -----------------------------------------------------------------
def fwhm_to_airy_radius(fwhm):
"""
This function ...
:param fwhm:
:return:
"""
sigma = statistics.fwhm_to_sigma * fwhm
return gaussian_sigma_to_airy_radius(sigma)
# -----------------------------------------------------------------
def fwhm(model):
"""
This function ...
:param model:
:return:
"""
return statistics.sigma_to_fwhm * sigma(model)
# -----------------------------------------------------------------
def fwhm_symmetric(model):
"""
This function ...
:param model:
:return:
"""
return statistics.sigma_to_fwhm * sigma_symmetric(model)
# -----------------------------------------------------------------
def shift_model(model, x_shift, y_shift):
"""
This function ...
:param model:
:param x_shift:
:param y_shift:
:return:
"""
# If the model is a 2D Gaussian function
if isinstance(model, models.Gaussian2D):
model.x_mean += x_shift
model.y_mean += y_shift
# If the model is a 2D Airy Disk function
elif isinstance(model, models.AiryDisk2D):
model.x_0 += x_shift
model.y_0 += y_shift
# Unsupported models
else: raise ValueError("Unsupported model (should be 'Gaussian2D' or 'AiryDisk2D'")
# -----------------------------------------------------------------
def shifted_model(model, x_shift, y_shift):
"""
This function ...
:param model:
:return:
"""
# Make a copy of the original model
new_model = copy.deepcopy(model)
# Shift the new model
shift_model(new_model, x_shift, y_shift)
# Return the new model
return new_model
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/tools/fitting.py
|
Python
|
agpl-3.0
| 24,098
|
[
"Gaussian"
] |
383f19020db8a04b7833942cc15c7dd1b8fe1228c0f162bfc84d1d07885ad7b3
|
from unittest import TestCase
import macaroonbakery.httpbakery as httpbakery
import macaroonbakery.bakery as bakery
class TestWebBrowserInteractionInfo(TestCase):
def test_from_dict(self):
info_dict = {
'VisitURL': 'https://example.com/visit',
'WaitTokenURL': 'https://example.com/wait'}
interaction_info = httpbakery.WebBrowserInteractionInfo.from_dict(info_dict)
self.assertEqual(
interaction_info.visit_url, 'https://example.com/visit')
self.assertEqual(
interaction_info.wait_token_url, 'https://example.com/wait')
class TestError(TestCase):
def test_from_dict_upper_case_fields(self):
err = httpbakery.Error.from_dict({
'Message': 'm',
'Code': 'c',
})
self.assertEqual(err, httpbakery.Error(
code='c',
message='m',
info=None,
version=bakery.LATEST_VERSION,
))
def test_from_dict_lower_case_fields(self):
err = httpbakery.Error.from_dict({
'message': 'm',
'code': 'c',
})
self.assertEqual(err, httpbakery.Error(
code='c',
message='m',
info=None,
version=bakery.LATEST_VERSION,
))
|
go-macaroon-bakery/py-macaroon-bakery
|
macaroonbakery/tests/test_httpbakery.py
|
Python
|
lgpl-3.0
| 1,292
|
[
"VisIt"
] |
4c53ce01afb7e55ae7d0c2c4be2b6696599b885b4c86a183826807f5009addde
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_optim_numbins_estimator(a, estimator):
"""
A helper function to be called from histogram to deal with estimating optimal number of bins
estimator: str
If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
assert isinstance(estimator, basestring)
# private function should not be called otherwise
if a.size == 0:
return 1
def sturges(x):
"""
Sturges Estimator
A very simplistic estimator based on the assumption of normality of the data
Poor performance for non-normal data, especially obvious for large X.
Depends only on size of the data.
"""
return np.ceil(np.log2(x.size)) + 1
def rice(x):
"""
Rice Estimator
Another simple estimator, with no normality assumption.
It has better performance for large data, but tends to overestimate number of bins.
The number of bins is proportional to the cube root of data size (asymptotically optimal)
Depends only on size of the data
"""
return np.ceil(2 * x.size ** (1.0 / 3))
def scott(x):
"""
Scott Estimator
The binwidth is proportional to the standard deviation of the data and
inversely proportional to the cube root of data size (asymptotically optimal)
"""
h = 3.5 * x.std() * x.size ** (-1.0 / 3)
if h > 0:
return np.ceil(x.ptp() / h)
return 1
def fd(x):
"""
Freedman Diaconis rule using interquartile range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
If the IQR is 0, we return 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size (asymptotically optimal)
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
if iqr > 0:
h = (2 * iqr * x.size ** (-1.0 / 3))
return np.ceil(x.ptp() / h)
# If iqr is 0, default number of bins is 1
return 1
def auto(x):
"""
The FD estimator is usually the most robust method, but it tends to be too small
for small X. The Sturges estimator is quite good for small (<1000) datasets and is
the default in R.
This method gives good off the shelf behaviour.
"""
return max(fd(x), sturges(x))
optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott,
'fd': fd, 'auto': auto}
try:
estimator_func = optimal_numbins_methods[estimator.lower()]
except KeyError:
raise ValueError("{0} not a valid method for `bins`".format(estimator))
else:
# these methods return floats, np.histogram requires an int
return int(estimator_func(a))
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use the method
chosen to calculate the optimal number of bins (see Notes for more detail
on the estimators). For visualisation, we suggest using the 'auto' option.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into account data
variability and data size .
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data size.
Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only optimal for
gaussian data and underestimates number of bins for large non-gaussian datasets.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well found in literature,
and are inspired by the choices R provides for histogram visualisation.
Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal,
which is why it appears in most estimators.
These are simply plug-in methods that give good starting points for number of bins.
In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the sturges
value will usually be chosen, while larger datasets will usually default to FD.
Avoids the overly conservative behaviour of FD and Sturges for small and
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \\frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \\frac{3.5\\sigma}{n^{1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The sd is not very robust to outliers. Values
are very similar to the Freedman Diaconis Estimator in the absence of outliers.
'Rice'
.. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil
The number of bins is only proportional to cube root of a.size.
It tends to overestimate the number of bins
and it does not take into account data variability.
'Sturges'
.. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil
The number of bins is the base2 log of a.size.
This estimator assumes normality of data and is too conservative for larger,
non-normal datasets. This is the default method in R's `hist` method.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data with 2000 points
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000)))
>>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
d = sqrt(d)
# calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed
for i in range(0, d.size):
c[i,:] /= (d * d[i])
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
LumPenPacK/NetworkExtractionFromImages
|
osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy_1.11/numpy/lib/function_base.py
|
Python
|
bsd-2-clause
| 144,305
|
[
"Gaussian"
] |
5fa0c353b60947256018dc95d7d9c78d96d3a6e1c20dc83dd550a46b54e67cf4
|
"""
Created on June 18, 2015
@author: shiruilu
Common utils for CAPE
"""
import cv2
import numpy as np
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
def safe_convert(x, new_dtype):
"""
http://stackoverflow.com/a/23325108/2729100
convert x to new_dtype, clip values larger than max or smaller than min
"""
info = np.iinfo(new_dtype)
return x.clip(info.min, info.max).astype(new_dtype)
def get_smoothed_hist(I_channel, ksize=30, sigma=10):
"""
get smoothed hist from a single channel
+TODO: consider replace the calc of face_enhancement.py _H, H
ARGs:
I_channel -- MASKED single channle image (not necessarily gray), 0 will not be counted.
ksize &
sigma -- For Gaussian kernel, following 3*sigma rule
RETURN:
h -- Smoothed hist
"""
# unsmoothed hist (cv2.calcHist return 2d vector)
_h = cv2.calcHist([I_channel],[0],None,[255],[1,256]).T.ravel()
# smooth hist, correlate only takes 1d input
h = np.correlate(_h, cv2.getGaussianKernel(ksize,sigma).ravel(), 'same')
return h
def detect_bimodal(H):
"""
H: all the (smoothed) histograms of faces on the image
RETURN:
bimodal_Fs: True means detected
False means undetected (i.e. not bimodal)
None means not sure, will plot H[i] for analysis
D, M, B: *Arrays* of detected Dark, Median, Bright intensities.
i.e. x-index of H
"""
# argrelextrema return (array([ 54, 132]),) (a tuple), only [0] used for 1d
maximas_Fs = [ argrelextrema(h, np.greater, order=10)[0] for h in H ]
# argrelextrema return (array([ 54, 132]),) (a tuple), only [0] used for 1d
minimas_Fs = [ argrelextrema(h, np.less, order=10)[0] for h in H ]
# # to visualize the bimodal:
# print "maximas each face(hist): ", maximas_Fs \
# , "minimas each face(hist): ", minimas_Fs
# plt.plot(H[i]); plt.xlim([1,256]); plt.show()
bimodal_Fs = np.zeros(len(H) ,dtype=bool)
D = np.zeros(len(H)); M = np.zeros(len(H)); B = np.zeros(len(H));
for i in range(len(H)): # each face i
tot_face_pix = np.sum(H[i])
if len(maximas_Fs[i]) ==2 and len(minimas_Fs[i]) ==1: #bimodal detected
d = maximas_Fs[i][0]
b = maximas_Fs[i][1]
m = minimas_Fs[i][0]
# print 'd,b,m: ',d,b,m
B[i] = b; M[i] = m; D[i] = d;
# NOTICE: Here its 0.003 not 5%(as described in CAPE)!
# 5% should be cumulated from several cylinders around the peak
# Here it's ONLY the highest peak
if H[i][d] >=0.003*tot_face_pix and H[i][b] >=0.003*tot_face_pix \
and (H[i][m] <=0.8*H[i][d] and H[i][m] <=0.8*H[i][b]):
bimodal_Fs[i] = True
elif len(maximas_Fs[i]) >2 or len(minimas_Fs[i]) >1:
print '?? more than two maximas, or more than one minima, see the plot'
plt.plot(H[i]); plt.xlim([1,256]); plt.show()
bimodal_Fs[i] = None
else:
None
return bimodal_Fs, D, M, B
def frange(start, stop, step):
it = start
while(it < stop):
yield it
it += step
def mask_skin(img, mask):
img_cp = img.copy()
img_cp[ ~mask ] = 0 # non-skin area set to 0
return img_cp
def mag(img, dtype='int'):
"""
magnify from [0,1] to [0.255]
"""
if dtype == 'int':
return safe_convert(np.rint(img*255), np.uint8)
elif dtype == 'float':
return (img*255)
elif dtype == 'trim':
return safe_convert(np.rint(img), np.uint8)
else:
raise ValueError('no such data type')
def display(img, name='', mode='bgr'):
"""
display image using matplotlib
ARGS:
img: bgr mode
name: string, displayed as title
"""
if mode == 'bgr':
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
elif mode == 'rgb':
plt.imshow(img)
elif mode == 'gray':
plt.imshow(img, 'gray')
elif mode == 'rainbow': # for 0-1 img
plt.imshow(img, cmap='rainbow')
else:
raise ValueError('CAPE display: unkown mode')
plt.title(name)
plt.show()
|
shiruilu/CAPE
|
cape_util/cape_util.py
|
Python
|
mit
| 4,184
|
[
"Gaussian"
] |
d153138412baa3ef7b9588502c5b8bc50483d5f16c81dbb48017d1d2f49bf839
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.