text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""This file contains a parser for the Mozilla Firefox history."""
import sqlite3
from plaso.events import time_events
from plaso.lib import event
from plaso.lib import eventdata
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
# Check SQlite version, bail out early if too old.
if sqlite3.sqlite_version_info < (3, 7, 8):
raise ImportWarning(
u'FirefoxHistoryParser requires at least SQLite version 3.7.8.')
class FirefoxPlacesBookmarkAnnotation(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark annotation event."""
DATA_TYPE = u'firefox:places:bookmark_annotation'
def __init__(self, timestamp, usage, row_id, title, url, content):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
title: The title of the bookmark folder.
url: The bookmarked URL.
content: The content of the annotation.
"""
super(FirefoxPlacesBookmarkAnnotation, self).__init__(
timestamp, usage)
self.offset = row_id
self.title = title
self.url = url
self.content = content
class FirefoxPlacesBookmarkFolder(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark folder event."""
DATA_TYPE = u'firefox:places:bookmark_folder'
def __init__(self, timestamp, usage, row_id, title):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
title: The title of the bookmark folder.
"""
super(FirefoxPlacesBookmarkFolder, self).__init__(
timestamp, usage)
self.offset = row_id
self.title = title
class FirefoxPlacesBookmark(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark event."""
DATA_TYPE = u'firefox:places:bookmark'
# TODO: move to formatter.
_TYPES = {
1: u'URL',
2: u'Folder',
3: u'Separator',
}
_TYPES.setdefault(u'N/A')
def __init__(
self, timestamp, usage, row_id, bookmark_type, title, url, places_title,
hostname, visit_count):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
bookmark_type: Integer value containing the bookmark type.
title: The title of the bookmark folder.
url: The bookmarked URL.
places_title: The places title.
hostname: The hostname.
visit_count: The visit count.
"""
super(FirefoxPlacesBookmark, self).__init__(timestamp, usage)
self.offset = row_id
self.type = self._TYPES[bookmark_type]
self.title = title
self.url = url
self.places_title = places_title
self.host = hostname
self.visit_count = visit_count
class FirefoxPlacesPageVisitedEvent(event.EventObject):
"""Convenience class for a Firefox page visited event."""
DATA_TYPE = u'firefox:places:page_visited'
def __init__(self, timestamp, row_id, url, title, hostname, visit_count,
visit_type, extra):
"""Initializes the event object.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
row_id: The identifier of the corresponding row.
url: The URL of the visited page.
title: The title of the visited page.
hostname: The visited hostname.
visit_count: The visit count.
visit_type: The transition type for the event.
extra: A list containing extra event data (TODO refactor).
"""
super(FirefoxPlacesPageVisitedEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = eventdata.EventTimestamp.PAGE_VISITED
self.offset = row_id
self.url = url
self.title = title
self.host = hostname
self.visit_count = visit_count
self.visit_type = visit_type
if extra:
self.extra = extra
class FirefoxDownload(time_events.TimestampEvent):
"""Convenience class for a Firefox download event."""
DATA_TYPE = u'firefox:downloads:download'
def __init__(self, timestamp, usage, row_id, name, url, referrer, full_path,
temporary_location, received_bytes, total_bytes, mime_type):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
name: The name of the download.
url: The source URL of the download.
referrer: The referrer URL of the download.
full_path: The full path of the target of the download.
temporary_location: The temporary location of the download.
received_bytes: The number of bytes received.
total_bytes: The total number of bytes of the download.
mime_type: The mime type of the download.
"""
super(FirefoxDownload, self).__init__(timestamp, usage)
self.offset = row_id
self.name = name
self.url = url
self.referrer = referrer
self.full_path = full_path
self.temporary_location = temporary_location
self.received_bytes = received_bytes
self.total_bytes = total_bytes
self.mime_type = mime_type
class FirefoxHistoryPlugin(interface.SQLitePlugin):
"""Parses a Firefox history file.
The Firefox history is stored in a SQLite database file named
places.sqlite.
"""
NAME = u'firefox_history'
DESCRIPTION = u'Parser for Firefox history SQLite database files.'
# Define the needed queries.
QUERIES = [
((u'SELECT moz_historyvisits.id, moz_places.url, moz_places.title, '
u'moz_places.visit_count, moz_historyvisits.visit_date, '
u'moz_historyvisits.from_visit, moz_places.rev_host, '
u'moz_places.hidden, moz_places.typed, moz_historyvisits.visit_type '
u'FROM moz_places, moz_historyvisits '
u'WHERE moz_places.id = moz_historyvisits.place_id'),
u'ParsePageVisitedRow'),
((u'SELECT moz_bookmarks.type, moz_bookmarks.title AS bookmark_title, '
u'moz_bookmarks.dateAdded, moz_bookmarks.lastModified, '
u'moz_places.url, moz_places.title AS places_title, '
u'moz_places.rev_host, moz_places.visit_count, moz_bookmarks.id '
u'FROM moz_places, moz_bookmarks '
u'WHERE moz_bookmarks.fk = moz_places.id AND moz_bookmarks.type <> 3'),
u'ParseBookmarkRow'),
((u'SELECT moz_items_annos.content, moz_items_annos.dateAdded, '
u'moz_items_annos.lastModified, moz_bookmarks.title, '
u'moz_places.url, moz_places.rev_host, moz_items_annos.id '
u'FROM moz_items_annos, moz_bookmarks, moz_places '
u'WHERE moz_items_annos.item_id = moz_bookmarks.id '
u'AND moz_bookmarks.fk = moz_places.id'),
u'ParseBookmarkAnnotationRow'),
((u'SELECT moz_bookmarks.id, moz_bookmarks.title,'
u'moz_bookmarks.dateAdded, moz_bookmarks.lastModified '
u'FROM moz_bookmarks WHERE moz_bookmarks.type = 2'),
u'ParseBookmarkFolderRow')]
# The required tables.
REQUIRED_TABLES = frozenset([
u'moz_places', u'moz_historyvisits', u'moz_bookmarks',
u'moz_items_annos'])
# Cache queries.
URL_CACHE_QUERY = (
u'SELECT h.id AS id, p.url, p.rev_host FROM moz_places p, '
u'moz_historyvisits h WHERE p.id = h.place_id')
def ParseBookmarkAnnotationRow(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['dateAdded']:
event_object = FirefoxPlacesBookmarkAnnotation(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], row['title'], row['url'], row['content'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmarkAnnotation(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], row['title'], row['url'], row['content'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseBookmarkFolderRow(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a bookmark folder row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if not row['title']:
title = u'N/A'
else:
title = row['title']
if row['dateAdded']:
event_object = FirefoxPlacesBookmarkFolder(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], title)
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmarkFolder(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], title)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseBookmarkRow(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a bookmark row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if row['dateAdded']:
event_object = FirefoxPlacesBookmark(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], row['type'], row['bookmark_title'], row['url'],
row['places_title'], getattr(row, u'rev_host', u'N/A'),
row['visit_count'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmark(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], row['type'], row['bookmark_title'], row['url'],
row['places_title'], getattr(row, u'rev_host', u'N/A'),
row['visit_count'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParsePageVisitedRow(
self, parser_mediator, row, query=None, cache=None, database=None,
**unused_kwargs):
"""Parses a page visited row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
cache: A cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
# TODO: make extra conditional formatting.
extras = []
if row['from_visit']:
extras.append(u'visited from: {0}'.format(
self._GetUrl(row['from_visit'], cache, database)))
if row['hidden'] == u'1':
extras.append(u'(url hidden)')
if row['typed'] == u'1':
extras.append(u'(directly typed)')
else:
extras.append(u'(URL not typed directly)')
if row['visit_date']:
event_object = FirefoxPlacesPageVisitedEvent(
row['visit_date'], row['id'], row['url'], row['title'],
self._ReverseHostname(row['rev_host']), row['visit_count'],
row['visit_type'], extras)
parser_mediator.ProduceEvent(event_object, query=query)
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname: The reversed hostname.
Returns:
Reversed string without a leading dot.
"""
if not hostname:
return u''
if len(hostname) > 1:
if hostname[-1] == '.':
return hostname[::-1][1:]
else:
return hostname[::-1][0:]
return hostname
def _GetUrl(self, url_id, cache, database):
"""Return an URL from a reference to an entry in the from_visit table."""
url_cache_results = cache.GetResults(u'url')
if not url_cache_results:
cursor = database.cursor
result_set = cursor.execute(self.URL_CACHE_QUERY)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
result_set, 'url', 'id', ('url', 'rev_host'))
url_cache_results = cache.GetResults(u'url')
url, reverse_host = url_cache_results.get(url_id, [u'', u''])
if not url:
return u''
hostname = self._ReverseHostname(reverse_host)
return u'{:s} ({:s})'.format(url, hostname)
class FirefoxDownloadsPlugin(interface.SQLitePlugin):
"""Parses a Firefox downloads file.
The Firefox downloads history is stored in a SQLite database file named
downloads.sqlite.
"""
NAME = u'firefox_downloads'
DESCRIPTION = u'Parser for Firefox downloads SQLite database files.'
# Define the needed queries.
QUERIES = [
((u'SELECT moz_downloads.id, moz_downloads.name, moz_downloads.source, '
u'moz_downloads.target, moz_downloads.tempPath, '
u'moz_downloads.startTime, moz_downloads.endTime, moz_downloads.state, '
u'moz_downloads.referrer, moz_downloads.currBytes, '
u'moz_downloads.maxBytes, moz_downloads.mimeType '
u'FROM moz_downloads'),
u'ParseDownloadsRow')]
# The required tables.
REQUIRED_TABLES = frozenset([u'moz_downloads'])
def ParseDownloadsRow(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a downloads row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['startTime']:
event_object = FirefoxDownload(
row['startTime'], eventdata.EventTimestamp.START_TIME, row['id'],
row['name'], row['source'], row['referrer'], row['target'],
row['tempPath'], row['currBytes'], row['maxBytes'],
row['mimeType'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['endTime']:
event_object = FirefoxDownload(
row['endTime'], eventdata.EventTimestamp.END_TIME, row['id'],
row['name'], row['source'], row['referrer'], row['target'],
row['tempPath'], row['currBytes'], row['maxBytes'],
row['mimeType'])
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugins([
FirefoxHistoryPlugin, FirefoxDownloadsPlugin])
|
jorik041/plaso
|
plaso/parsers/sqlite_plugins/firefox.py
|
Python
|
apache-2.0
| 15,322
|
[
"VisIt"
] |
ca9a9a55d281cfc86b4f22c18e2ce9bd125499d30588f20236c008f1f7bf1a42
|
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
import bpy
from bpy.props import (BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
PointerProperty)
# enums
import _cycles
enum_devices = (
('CPU', "CPU", "Use CPU for rendering"),
('GPU', "GPU Compute", "Use GPU compute device for rendering, configured in user preferences"),
)
if _cycles.with_network:
enum_devices += (('NETWORK', "Networked Device", "Use networked device for rendering"),)
enum_feature_set = (
('SUPPORTED', "Supported", "Only use finished and supported features"),
('EXPERIMENTAL', "Experimental", "Use experimental and incomplete features that might be broken or change in the future", 'ERROR', 1),
)
enum_displacement_methods = (
('BUMP', "Bump", "Bump mapping to simulate the appearance of displacement"),
('TRUE', "True", "Use true displacement only, requires fine subdivision"),
('BOTH', "Both", "Combination of displacement and bump mapping"),
)
enum_bvh_types = (
('DYNAMIC_BVH', "Dynamic BVH", "Objects can be individually updated, at the cost of slower render time"),
('STATIC_BVH', "Static BVH", "Any object modification requires a complete BVH rebuild, but renders faster"),
)
enum_filter_types = (
('BOX', "Box", "Box filter"),
('GAUSSIAN', "Gaussian", "Gaussian filter"),
('BLACKMAN_HARRIS', "Blackman-Harris", "Blackman-Harris filter"),
)
enum_aperture_types = (
('RADIUS', "Radius", "Directly change the size of the aperture"),
('FSTOP', "F-stop", "Change the size of the aperture by f-stop"),
)
enum_panorama_types = (
('EQUIRECTANGULAR', "Equirectangular", "Render the scene with a spherical camera, also known as Lat Long panorama"),
('FISHEYE_EQUIDISTANT', "Fisheye Equidistant", "Ideal for fulldomes, ignore the sensor dimensions"),
('FISHEYE_EQUISOLID', "Fisheye Equisolid",
"Similar to most fisheye modern lens, takes sensor dimensions into consideration"),
('MIRRORBALL', "Mirror Ball", "Uses the mirror ball mapping"),
)
enum_curve_primitives = (
('TRIANGLES', "Triangles", "Create triangle geometry around strands"),
('LINE_SEGMENTS', "Line Segments", "Use line segment primitives"),
('CURVE_SEGMENTS', "Curve Segments", "Use segmented cardinal curve primitives"),
)
enum_triangle_curves = (
('CAMERA_TRIANGLES', "Planes", "Create individual triangles forming planes that face camera"),
('TESSELLATED_TRIANGLES', "Tessellated", "Create mesh surrounding each strand"),
)
enum_curve_shape = (
('RIBBONS', "Ribbons", "Ignore thickness of each strand"),
('THICK', "Thick", "Use thickness of strand when rendering"),
)
enum_tile_order = (
('CENTER', "Center", "Render from center to the edges"),
('RIGHT_TO_LEFT', "Right to Left", "Render from right to left"),
('LEFT_TO_RIGHT', "Left to Right", "Render from left to right"),
('TOP_TO_BOTTOM', "Top to Bottom", "Render from top to bottom"),
('BOTTOM_TO_TOP', "Bottom to Top", "Render from bottom to top"),
)
enum_use_layer_samples = (
('USE', "Use", "Per render layer number of samples override scene samples"),
('BOUNDED', "Bounded", "Bound per render layer number of samples by global samples"),
('IGNORE', "Ignore", "Ignore per render layer number of samples"),
)
enum_sampling_pattern = (
('SOBOL', "Sobol", "Use Sobol random sampling pattern"),
('CORRELATED_MUTI_JITTER', "Correlated Multi-Jitter", "Use Correlated Multi-Jitter random sampling pattern"),
)
enum_integrator = (
('BRANCHED_PATH', "Branched Path Tracing", "Path tracing integrator that branches on the first bounce, giving more control over the number of light and material samples"),
('PATH', "Path Tracing", "Pure path tracing integrator"),
)
enum_volume_sampling = (
('DISTANCE', "Distance", "Use distance sampling, best for dense volumes with lights far away"),
('EQUIANGULAR', "Equiangular", "Use equiangular sampling, best for volumes with low density with light inside or near the volume"),
('MULTIPLE_IMPORTANCE', "Multiple Importance", "Combine distance and equi-angular sampling for volumes where neither method is ideal"),
)
enum_volume_interpolation = (
('LINEAR', "Linear", "Good smoothness and speed"),
('CUBIC', "Cubic", "Smoothed high quality interpolation, but slower")
)
class CyclesRenderSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Scene.cycles = PointerProperty(
name="Cycles Render Settings",
description="Cycles render settings",
type=cls,
)
cls.device = EnumProperty(
name="Device",
description="Device to use for rendering",
items=enum_devices,
default='CPU',
)
cls.feature_set = EnumProperty(
name="Feature Set",
description="Feature set to use for rendering",
items=enum_feature_set,
default='SUPPORTED',
)
cls.shading_system = BoolProperty(
name="Open Shading Language",
description="Use Open Shading Language (CPU rendering only)",
)
cls.progressive = EnumProperty(
name="Integrator",
description="Method to sample lights and materials",
items=enum_integrator,
default='PATH',
)
cls.use_square_samples = BoolProperty(
name="Square Samples",
description="Square sampling values for easier artist control",
default=False,
)
cls.samples = IntProperty(
name="Samples",
description="Number of samples to render for each pixel",
min=1, max=2147483647,
default=10,
)
cls.preview_samples = IntProperty(
name="Preview Samples",
description="Number of samples to render in the viewport, unlimited if 0",
min=0, max=2147483647,
default=10,
)
cls.preview_pause = BoolProperty(
name="Pause Preview",
description="Pause all viewport preview renders",
default=False,
)
cls.preview_active_layer = BoolProperty(
name="Preview Active Layer",
description="Preview active render layer in viewport",
default=False,
)
cls.aa_samples = IntProperty(
name="AA Samples",
description="Number of antialiasing samples to render for each pixel",
min=1, max=10000,
default=4,
)
cls.preview_aa_samples = IntProperty(
name="AA Samples",
description="Number of antialiasing samples to render in the viewport, unlimited if 0",
min=0, max=10000,
default=4,
)
cls.diffuse_samples = IntProperty(
name="Diffuse Samples",
description="Number of diffuse bounce samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.glossy_samples = IntProperty(
name="Glossy Samples",
description="Number of glossy bounce samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.transmission_samples = IntProperty(
name="Transmission Samples",
description="Number of transmission bounce samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.ao_samples = IntProperty(
name="Ambient Occlusion Samples",
description="Number of ambient occlusion samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.mesh_light_samples = IntProperty(
name="Mesh Light Samples",
description="Number of mesh emission light samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.subsurface_samples = IntProperty(
name="Subsurface Samples",
description="Number of subsurface scattering samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.volume_samples = IntProperty(
name="Volume Samples",
description="Number of volume scattering samples to render for each AA sample",
min=1, max=10000,
default=0,
)
cls.sampling_pattern = EnumProperty(
name="Sampling Pattern",
description="Random sampling pattern used by the integrator",
items=enum_sampling_pattern,
default='SOBOL',
)
cls.use_layer_samples = EnumProperty(
name="Layer Samples",
description="How to use per render layer sample settings",
items=enum_use_layer_samples,
default='USE',
)
cls.sample_all_lights_direct = BoolProperty(
name="Sample All Direct Lights",
description="Sample all lights (for direct samples), rather than randomly picking one",
default=True,
)
cls.sample_all_lights_indirect = BoolProperty(
name="Sample All Indirect Lights",
description="Sample all lights (for indirect samples), rather than randomly picking one",
default=True,
)
cls.caustics_reflective = BoolProperty(
name="Reflective Caustics",
description="Use reflective caustics, resulting in a brighter image (more noise but added realism)",
default=True,
)
cls.caustics_refractive = BoolProperty(
name="Refractive Caustics",
description="Use refractive caustics, resulting in a brighter image (more noise but added realism)",
default=True,
)
cls.blur_glossy = FloatProperty(
name="Filter Glossy",
description="Adaptively blur glossy shaders after blurry bounces, "
"to reduce noise at the cost of accuracy",
min=0.0, max=10.0,
default=0.0,
)
cls.min_bounces = IntProperty(
name="Min Bounces",
description="Minimum number of bounces, setting this lower "
"than the maximum enables probabilistic path "
"termination (faster but noisier)",
min=0, max=1024,
default=3,
)
cls.max_bounces = IntProperty(
name="Max Bounces",
description="Total maximum number of bounces",
min=0, max=1024,
default=12,
)
cls.diffuse_bounces = IntProperty(
name="Diffuse Bounces",
description="Maximum number of diffuse reflection bounces, bounded by total maximum",
min=0, max=1024,
default=4,
)
cls.glossy_bounces = IntProperty(
name="Glossy Bounces",
description="Maximum number of glossy reflection bounces, bounded by total maximum",
min=0, max=1024,
default=4,
)
cls.transmission_bounces = IntProperty(
name="Transmission Bounces",
description="Maximum number of transmission bounces, bounded by total maximum",
min=0, max=1024,
default=12,
)
cls.volume_bounces = IntProperty(
name="Volume Bounces",
description="Maximum number of volumetric scattering events",
min=0, max=1024,
default=0,
)
cls.transparent_min_bounces = IntProperty(
name="Transparent Min Bounces",
description="Minimum number of transparent bounces, setting "
"this lower than the maximum enables "
"probabilistic path termination (faster but "
"noisier)",
min=0, max=1024,
default=8,
)
cls.transparent_max_bounces = IntProperty(
name="Transparent Max Bounces",
description="Maximum number of transparent bounces",
min=0, max=1024,
default=8,
)
cls.use_transparent_shadows = BoolProperty(
name="Transparent Shadows",
description="Use transparency of surfaces for rendering shadows",
default=True,
)
cls.volume_step_size = FloatProperty(
name="Step Size",
description="Distance between volume shader samples when rendering the volume "
"(lower values give more accurate and detailed results, but also increased render time)",
default=0.1,
min=0.0000001, max=100000.0, soft_min=0.01, soft_max=1.0
)
cls.volume_max_steps = IntProperty(
name="Max Steps",
description="Maximum number of steps through the volume before giving up, "
"to avoid extremely long render times with big objects or small step sizes",
default=1024,
min=2, max=65536
)
cls.film_exposure = FloatProperty(
name="Exposure",
description="Image brightness scale",
min=0.0, max=10.0,
default=1.0,
)
cls.film_transparent = BoolProperty(
name="Transparent",
description="World background is transparent with premultiplied alpha",
default=False,
)
cls.filter_type = EnumProperty(
name="Filter Type",
description="Pixel filter type",
items=enum_filter_types,
default='GAUSSIAN',
)
cls.filter_width = FloatProperty(
name="Filter Width",
description="Pixel filter width",
min=0.01, max=10.0,
default=1.5,
)
cls.seed = IntProperty(
name="Seed",
description="Seed value for integrator to get different noise patterns",
min=0, max=2147483647,
default=0,
)
cls.use_animated_seed = BoolProperty(
name="Use Animated Seed",
description="Use different seed values (and hence noise patterns) at different frames",
default=False,
)
cls.sample_clamp_direct = FloatProperty(
name="Clamp Direct",
description="If non-zero, the maximum value for a direct sample, "
"higher values will be scaled down to avoid too "
"much noise and slow convergence at the cost of accuracy",
min=0.0, max=1e8,
default=0.0,
)
cls.sample_clamp_indirect = FloatProperty(
name="Clamp Indirect",
description="If non-zero, the maximum value for an indirect sample, "
"higher values will be scaled down to avoid too "
"much noise and slow convergence at the cost of accuracy",
min=0.0, max=1e8,
default=0.0,
)
cls.debug_tile_size = IntProperty(
name="Tile Size",
description="",
min=1, max=4096,
default=1024,
)
cls.preview_start_resolution = IntProperty(
name="Start Resolution",
description="Resolution to start rendering preview at, "
"progressively increasing it to the full viewport size",
min=8, max=16384,
default=64,
)
cls.debug_reset_timeout = FloatProperty(
name="Reset timeout",
description="",
min=0.01, max=10.0,
default=0.1,
)
cls.debug_cancel_timeout = FloatProperty(
name="Cancel timeout",
description="",
min=0.01, max=10.0,
default=0.1,
)
cls.debug_text_timeout = FloatProperty(
name="Text timeout",
description="",
min=0.01, max=10.0,
default=1.0,
)
cls.debug_bvh_type = EnumProperty(
name="Viewport BVH Type",
description="Choose between faster updates, or faster render",
items=enum_bvh_types,
default='DYNAMIC_BVH',
)
cls.debug_use_spatial_splits = BoolProperty(
name="Use Spatial Splits",
description="Use BVH spatial splits: longer builder time, faster render",
default=False,
)
cls.tile_order = EnumProperty(
name="Tile Order",
description="Tile order for rendering",
items=enum_tile_order,
default='CENTER',
options=set(), # Not animatable!
)
cls.use_progressive_refine = BoolProperty(
name="Progressive Refine",
description="Instead of rendering each tile until it is finished, "
"refine the whole image progressively "
"(this renders somewhat slower, "
"but time can be saved by manually stopping the render when the noise is low enough)",
default=False,
)
cls.bake_type = EnumProperty(
name="Bake Type",
default='COMBINED',
description="Type of pass to bake",
items=(
('COMBINED', "Combined", ""),
('AO', "Ambient Occlusion", ""),
('SHADOW', "Shadow", ""),
('NORMAL', "Normal", ""),
('UV', "UV", ""),
('EMIT', "Emit", ""),
('ENVIRONMENT', "Environment", ""),
('DIFFUSE_DIRECT', "Diffuse Direct", ""),
('DIFFUSE_INDIRECT', "Diffuse Indirect", ""),
('DIFFUSE_COLOR', "Diffuse Color", ""),
('GLOSSY_DIRECT', "Glossy Direct", ""),
('GLOSSY_INDIRECT', "Glossy Indirect", ""),
('GLOSSY_COLOR', "Glossy Color", ""),
('TRANSMISSION_DIRECT', "Transmission Direct", ""),
('TRANSMISSION_INDIRECT', "Transmission Indirect", ""),
('TRANSMISSION_COLOR', "Transmission Color", ""),
('SUBSURFACE_DIRECT', "Subsurface Direct", ""),
('SUBSURFACE_INDIRECT', "Subsurface Indirect", ""),
('SUBSURFACE_COLOR', "Subsurface Color", ""),
),
)
cls.use_camera_cull = BoolProperty(
name="Use Camera Cull",
description="Allow objects to be culled based on the camera frustum",
default=False,
)
cls.camera_cull_margin = FloatProperty(
name="Camera Cull Margin",
description="Margin for the camera space culling",
default=0.1,
min=0.0, max=5.0
)
cls.motion_blur_position = EnumProperty(
name="Motion Blur Position",
default='CENTER',
description="Offset for the shutter's time interval, allows to change the motion blur trails",
items=(
('START', "Start on Frame", "The shutter opens at the current frame"),
('CENTER', "Center on Frame", "The shutter is open during the current frame"),
('END', "End on Frame", "The shutter closes at the current frame"),
),
)
@classmethod
def unregister(cls):
del bpy.types.Scene.cycles
class CyclesCameraSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
import math
bpy.types.Camera.cycles = PointerProperty(
name="Cycles Camera Settings",
description="Cycles camera settings",
type=cls,
)
cls.aperture_type = EnumProperty(
name="Aperture Type",
description="Use f-stop number or aperture radius",
items=enum_aperture_types,
default='RADIUS',
)
cls.aperture_fstop = FloatProperty(
name="Aperture f-stop",
description="F-stop ratio (lower numbers give more defocus, higher numbers give a sharper image)",
min=0.0, soft_min=0.1, soft_max=64.0,
default=5.6,
step=10,
precision=1,
)
cls.aperture_size = FloatProperty(
name="Aperture Size",
description="Radius of the aperture for depth of field (higher values give more defocus)",
min=0.0, soft_max=10.0,
default=0.0,
step=1,
precision=4,
subtype='DISTANCE',
)
cls.aperture_blades = IntProperty(
name="Aperture Blades",
description="Number of blades in aperture for polygonal bokeh (at least 3)",
min=0, max=100,
default=0,
)
cls.aperture_rotation = FloatProperty(
name="Aperture Rotation",
description="Rotation of blades in aperture",
soft_min=-math.pi, soft_max=math.pi,
subtype='ANGLE',
default=0,
)
cls.aperture_ratio = FloatProperty(
name="Aperture Ratio",
description="Distortion to simulate anamorphic lens bokeh",
min=0.01, soft_min=1.0, soft_max=2.0,
default=1.0,
precision=4,
)
cls.panorama_type = EnumProperty(
name="Panorama Type",
description="Distortion to use for the calculation",
items=enum_panorama_types,
default='FISHEYE_EQUISOLID',
)
cls.fisheye_fov = FloatProperty(
name="Field of View",
description="Field of view for the fisheye lens",
min=0.1745, soft_max=2.0 * math.pi, max=10.0 * math.pi,
subtype='ANGLE',
default=math.pi,
)
cls.fisheye_lens = FloatProperty(
name="Fisheye Lens",
description="Lens focal length (mm)",
min=0.01, soft_max=15.0, max=100.0,
default=10.5,
)
cls.latitude_min = FloatProperty(
name="Min Latitude",
description="Minimum latitude (vertical angle) for the equirectangular lens",
min=-0.5 * math.pi, max=0.5 * math.pi,
subtype='ANGLE',
default=-0.5 * math.pi,
)
cls.latitude_max = FloatProperty(
name="Max Latitude",
description="Maximum latitude (vertical angle) for the equirectangular lens",
min=-0.5 * math.pi, max=0.5 * math.pi,
subtype='ANGLE',
default=0.5 * math.pi,
)
cls.longitude_min = FloatProperty(
name="Min Longitude",
description="Minimum longitude (horizontal angle) for the equirectangular lens",
min=-math.pi, max=math.pi,
subtype='ANGLE',
default=-math.pi,
)
cls.longitude_max = FloatProperty(
name="Max Longitude",
description="Maximum longitude (horizontal angle) for the equirectangular lens",
min=-math.pi, max=math.pi,
subtype='ANGLE',
default=math.pi,
)
@classmethod
def unregister(cls):
del bpy.types.Camera.cycles
class CyclesMaterialSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Material.cycles = PointerProperty(
name="Cycles Material Settings",
description="Cycles material settings",
type=cls,
)
cls.sample_as_light = BoolProperty(
name="Multiple Importance Sample",
description="Use multiple importance sampling for this material, "
"disabling may reduce overall noise for large "
"objects that emit little light compared to other light sources",
default=True,
)
cls.use_transparent_shadow = BoolProperty(
name="Transparent Shadows",
description="Use transparent shadows for this material if it contains a Transparent BSDF, "
"disabling will render faster but not give accurate shadows",
default=True,
)
cls.homogeneous_volume = BoolProperty(
name="Homogeneous Volume",
description="When using volume rendering, assume volume has the same density everywhere "
"(not using any textures), for faster rendering",
default=False,
)
cls.volume_sampling = EnumProperty(
name="Volume Sampling",
description="Sampling method to use for volumes",
items=enum_volume_sampling,
default='DISTANCE',
)
cls.volume_interpolation = EnumProperty(
name="Volume Interpolation",
description="Interpolation method to use for smoke/fire volumes",
items=enum_volume_interpolation,
default='LINEAR',
)
@classmethod
def unregister(cls):
del bpy.types.Material.cycles
class CyclesLampSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Lamp.cycles = PointerProperty(
name="Cycles Lamp Settings",
description="Cycles lamp settings",
type=cls,
)
cls.cast_shadow = BoolProperty(
name="Cast Shadow",
description="Lamp casts shadows",
default=True,
)
cls.samples = IntProperty(
name="Samples",
description="Number of light samples to render for each AA sample",
min=1, max=10000,
default=1,
)
cls.max_bounces = IntProperty(
name="Max Bounces",
description="Maximum number of bounces the light will contribute to the render",
min=0, max=1024,
default=1024,
)
cls.use_multiple_importance_sampling = BoolProperty(
name="Multiple Importance Sample",
description="Use multiple importance sampling for the lamp, "
"reduces noise for area lamps and sharp glossy materials",
default=False,
)
cls.is_portal = BoolProperty(
name="Is Portal",
description="Use this area lamp to guide sampling of the background, "
"note that this will make the lamp invisible",
default=False,
)
@classmethod
def unregister(cls):
del bpy.types.Lamp.cycles
class CyclesWorldSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.World.cycles = PointerProperty(
name="Cycles World Settings",
description="Cycles world settings",
type=cls,
)
cls.sample_as_light = BoolProperty(
name="Multiple Importance Sample",
description="Use multiple importance sampling for the environment, "
"enabling for non-solid colors is recommended",
default=False,
)
cls.sample_map_resolution = IntProperty(
name="Map Resolution",
description="Importance map size is resolution x resolution; "
"higher values potentially produce less noise, at the cost of memory and speed",
min=4, max=8192,
default=256,
)
cls.samples = IntProperty(
name="Samples",
description="Number of light samples to render for each AA sample",
min=1, max=10000,
default=4,
)
cls.max_bounces = IntProperty(
name="Max Bounces",
description="Maximum number of bounces the background light will contribute to the render",
min=0, max=1024,
default=1024,
)
cls.homogeneous_volume = BoolProperty(
name="Homogeneous Volume",
description="When using volume rendering, assume volume has the same density everywhere"
"(not using any textures), for faster rendering",
default=False,
)
cls.volume_sampling = EnumProperty(
name="Volume Sampling",
description="Sampling method to use for volumes",
items=enum_volume_sampling,
default='EQUIANGULAR',
)
cls.volume_interpolation = EnumProperty(
name="Volume Interpolation",
description="Interpolation method to use for volumes",
items=enum_volume_interpolation,
default='LINEAR',
)
@classmethod
def unregister(cls):
del bpy.types.World.cycles
class CyclesVisibilitySettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Object.cycles_visibility = PointerProperty(
name="Cycles Visibility Settings",
description="Cycles visibility settings",
type=cls,
)
bpy.types.World.cycles_visibility = PointerProperty(
name="Cycles Visibility Settings",
description="Cycles visibility settings",
type=cls,
)
cls.camera = BoolProperty(
name="Camera",
description="Object visibility for camera rays",
default=True,
)
cls.diffuse = BoolProperty(
name="Diffuse",
description="Object visibility for diffuse reflection rays",
default=True,
)
cls.glossy = BoolProperty(
name="Glossy",
description="Object visibility for glossy reflection rays",
default=True,
)
cls.transmission = BoolProperty(
name="Transmission",
description="Object visibility for transmission rays",
default=True,
)
cls.shadow = BoolProperty(
name="Shadow",
description="Object visibility for shadow rays",
default=True,
)
cls.scatter = BoolProperty(
name="Volume Scatter",
description="Object visibility for volume scatter rays",
default=True,
)
@classmethod
def unregister(cls):
del bpy.types.Object.cycles_visibility
del bpy.types.World.cycles_visibility
class CyclesMeshSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Mesh.cycles = PointerProperty(
name="Cycles Mesh Settings",
description="Cycles mesh settings",
type=cls,
)
bpy.types.Curve.cycles = PointerProperty(
name="Cycles Mesh Settings",
description="Cycles mesh settings",
type=cls,
)
bpy.types.MetaBall.cycles = PointerProperty(
name="Cycles Mesh Settings",
description="Cycles mesh settings",
type=cls,
)
cls.displacement_method = EnumProperty(
name="Displacement Method",
description="Method to use for the displacement",
items=enum_displacement_methods,
default='BUMP',
)
cls.use_subdivision = BoolProperty(
name="Use Subdivision",
description="Subdivide mesh for rendering",
default=False,
)
cls.dicing_rate = FloatProperty(
name="Dicing Rate",
description="",
min=0.001, max=1000.0,
default=1.0,
)
@classmethod
def unregister(cls):
del bpy.types.Mesh.cycles
del bpy.types.Curve.cycles
del bpy.types.MetaBall.cycles
class CyclesObjectBlurSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Object.cycles = PointerProperty(
name="Cycles Object Settings",
description="Cycles object settings",
type=cls,
)
cls.use_motion_blur = BoolProperty(
name="Use Motion Blur",
description="Use motion blur for this object",
default=True,
)
cls.use_deform_motion = BoolProperty(
name="Use Deformation Motion",
description="Use deformation motion blur for this object",
default=True,
)
cls.motion_steps = IntProperty(
name="Motion Steps",
description="Control accuracy of deformation motion blur, more steps gives more memory usage (actual number of steps is 2^(steps - 1))",
min=1, soft_max=8,
default=1,
)
cls.use_camera_cull = BoolProperty(
name="Use Camera Cull",
description="Allow this object and its duplicators to be culled by camera space culling",
default=False,
)
@classmethod
def unregister(cls):
del bpy.types.Object.cycles
class CyclesCurveRenderSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.Scene.cycles_curves = PointerProperty(
name="Cycles Hair Rendering Settings",
description="Cycles hair rendering settings",
type=cls,
)
cls.primitive = EnumProperty(
name="Primitive",
description="Type of primitive used for hair rendering",
items=enum_curve_primitives,
default='LINE_SEGMENTS',
)
cls.shape = EnumProperty(
name="Shape",
description="Form of hair",
items=enum_curve_shape,
default='THICK',
)
cls.cull_backfacing = BoolProperty(
name="Cull back-faces",
description="Do not test the back-face of each strand",
default=True,
)
cls.use_curves = BoolProperty(
name="Use Cycles Hair Rendering",
description="Activate Cycles hair rendering for particle system",
default=True,
)
cls.resolution = IntProperty(
name="Resolution",
description="Resolution of generated mesh",
min=3, max=64,
default=3,
)
cls.minimum_width = FloatProperty(
name="Minimal width",
description="Minimal pixel width for strands (0 - deactivated)",
min=0.0, max=100.0,
default=0.0,
)
cls.maximum_width = FloatProperty(
name="Maximal width",
description="Maximum extension that strand radius can be increased by",
min=0.0, max=100.0,
default=0.1,
)
cls.subdivisions = IntProperty(
name="Subdivisions",
description="Number of subdivisions used in Cardinal curve intersection (power of 2)",
min=0, max=24,
default=4,
)
@classmethod
def unregister(cls):
del bpy.types.Scene.cycles_curves
class CyclesCurveSettings(bpy.types.PropertyGroup):
@classmethod
def register(cls):
bpy.types.ParticleSettings.cycles = PointerProperty(
name="Cycles Hair Settings",
description="Cycles hair settings",
type=cls,
)
cls.radius_scale = FloatProperty(
name="Radius Scaling",
description="Multiplier of width properties",
min=0.0, max=1000.0,
default=0.01,
)
cls.root_width = FloatProperty(
name="Root Size",
description="Strand's width at root",
min=0.0, max=1000.0,
default=1.0,
)
cls.tip_width = FloatProperty(
name="Tip Multiplier",
description="Strand's width at tip",
min=0.0, max=1000.0,
default=0.0,
)
cls.shape = FloatProperty(
name="Strand Shape",
description="Strand shape parameter",
min=-1.0, max=1.0,
default=0.0,
)
cls.use_closetip = BoolProperty(
name="Close tip",
description="Set tip radius to zero",
default=True,
)
@classmethod
def unregister(cls):
del bpy.types.ParticleSettings.cycles
def register():
bpy.utils.register_class(CyclesRenderSettings)
bpy.utils.register_class(CyclesCameraSettings)
bpy.utils.register_class(CyclesMaterialSettings)
bpy.utils.register_class(CyclesLampSettings)
bpy.utils.register_class(CyclesWorldSettings)
bpy.utils.register_class(CyclesVisibilitySettings)
bpy.utils.register_class(CyclesMeshSettings)
bpy.utils.register_class(CyclesCurveRenderSettings)
bpy.utils.register_class(CyclesCurveSettings)
def unregister():
bpy.utils.unregister_class(CyclesRenderSettings)
bpy.utils.unregister_class(CyclesCameraSettings)
bpy.utils.unregister_class(CyclesMaterialSettings)
bpy.utils.unregister_class(CyclesLampSettings)
bpy.utils.unregister_class(CyclesWorldSettings)
bpy.utils.unregister_class(CyclesMeshSettings)
bpy.utils.unregister_class(CyclesVisibilitySettings)
bpy.utils.unregister_class(CyclesCurveRenderSettings)
bpy.utils.unregister_class(CyclesCurveSettings)
|
Passtechsoft/TPEAlpGen
|
blender/intern/cycles/blender/addon/properties.py
|
Python
|
gpl-3.0
| 41,098
|
[
"Gaussian"
] |
7dc65d27c9e51d41fcf8ed51f19d5212d046adef84898342eaa9296a4acffa4f
|
#!/usr/bin/env python
from pymatgen.core.periodic_table import all_symbols, Element
import urllib
def get_nist_data(element):
url = "http://physics.nist.gov/cgi-bin/Elements/elInfo.pl?element=%d&context=text"
lines = urllib.urlopen(url % element.Z).read().split('\n')
data = {}
for line in lines:
if "Atomic Weight" in line:
print line.split()[2]
for symbol in all_symbols():
element = Element(symbol)
print element.Z
print element.symbol
print element.name
print element.atomic_mass
print element.data['Electronic structure']
|
ldamewood/figures
|
scripts/periodic_table.py
|
Python
|
mit
| 591
|
[
"pymatgen"
] |
6583a2c309b7def7926557e83ed6aa209a98de8d8e904b0e71f42d4e195e188e
|
import os
import unittest
import numpy as np
import tensorflow as tf
from pymatgen.core import Structure
from megnet.data.crystal import (CrystalGraph, CrystalGraphDisordered,
CrystalGraphWithBondTypes,
get_elemental_embeddings)
from megnet.data.graph import GaussianDistance
from megnet.utils.general import to_list
module_dir = os.path.dirname(os.path.abspath(__file__))
class TestGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.structures = [
Structure.from_file(os.path.join(module_dir, "cifs", "LiFePO4_mp-19017_computed.cif")),
Structure.from_file(os.path.join(module_dir, "cifs", "BaTiO3_mp-2998_computed.cif")),
]
def test_crystalgraph(self):
cg = CrystalGraph(cutoff=4)
graph = cg.convert(self.structures[0])
self.assertEqual(cg.cutoff, 4)
keys = set(graph.keys())
self.assertSetEqual({"bond", "atom", "index1", "index2", "state"}, keys)
cg2 = CrystalGraph(cutoff=6)
self.assertEqual(cg2.cutoff, 6)
graph2 = cg2.convert(self.structures[0])
self.assertListEqual(to_list(graph2["state"][0]), [0, 0])
graph3 = cg(self.structures[0])
np.testing.assert_almost_equal(graph["atom"], graph3["atom"])
def test_crystalgraph_disordered(self):
cg = CrystalGraphDisordered(cutoff=4.0)
graph = cg.convert(self.structures[0])
self.assertEqual(cg.atom_converter.convert(graph["atom"]).shape[1], 16)
def test_crystal_graph_with_bond_types(self):
graph = {
"atom": [11, 8, 8],
"index1": [0, 0, 1, 1, 2, 2],
"index2": [0, 1, 2, 2, 1, 1],
"bond": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"state": [[0, 0]],
}
cgbt = CrystalGraphWithBondTypes(nn_strategy="VoronoiNN")
new_graph = cgbt._get_bond_type(graph)
self.assertListEqual(to_list(new_graph["bond"]), [2, 1, 0, 0, 0, 0])
def test_convert(self):
cg = CrystalGraph(cutoff=4)
graph = cg.convert(self.structures[0])
self.assertListEqual(to_list(graph["atom"]), [i.specie.Z for i in self.structures[0]])
def test_get_input(self):
cg = CrystalGraph(cutoff=4, bond_converter=GaussianDistance(np.linspace(0, 5, 100), 0.5))
inp = cg.get_input(self.structures[0])
self.assertEqual(len(inp), 7)
shapes = [i.shape for i in inp]
true_shapes = [(1, 28), (1, 704, 100), (1, 1, 2), (1, 704), (1, 704), (1, 28), (1, 704)]
for i, j in zip(shapes, true_shapes):
self.assertListEqual(list(i), list(j))
def test_get_flat_data(self):
cg = CrystalGraph(cutoff=4)
graphs = [cg.convert(i) for i in self.structures]
targets = [0.1, 0.2]
inp = cg.get_flat_data(graphs, targets)
self.assertListEqual([len(i) for i in inp], [2] * 6)
def test_get_elemental_embeddings(self):
data = get_elemental_embeddings()
for k, v in data.items():
self.assertTrue(len(v) == 16)
if __name__ == "__main__":
unittest.main()
|
materialsvirtuallab/megnet
|
megnet/data/tests/test_crystal.py
|
Python
|
bsd-3-clause
| 3,152
|
[
"CRYSTAL",
"pymatgen"
] |
6159716b7b6dbaea7ba8b40381f3aa29a2a064b025d8ac79e438b774b52b1bb6
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class NetworkComparisonResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, networks=None, network_comparisons=None):
"""
NetworkComparisonResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'networks': 'list[Network]',
'network_comparisons': 'list[NetworkComparison]'
}
self.attribute_map = {
'networks': 'networks',
'network_comparisons': 'network_comparisons'
}
self._networks = networks
self._network_comparisons = network_comparisons
@property
def networks(self):
"""
Gets the networks of this NetworkComparisonResponse.
Networks
:return: The networks of this NetworkComparisonResponse.
:rtype: list[Network]
"""
return self._networks
@networks.setter
def networks(self, networks):
"""
Sets the networks of this NetworkComparisonResponse.
Networks
:param networks: The networks of this NetworkComparisonResponse.
:type: list[Network]
"""
self._networks = networks
@property
def network_comparisons(self):
"""
Gets the network_comparisons of this NetworkComparisonResponse.
NetworkComparisons
:return: The network_comparisons of this NetworkComparisonResponse.
:rtype: list[NetworkComparison]
"""
return self._network_comparisons
@network_comparisons.setter
def network_comparisons(self, network_comparisons):
"""
Sets the network_comparisons of this NetworkComparisonResponse.
NetworkComparisons
:param network_comparisons: The network_comparisons of this NetworkComparisonResponse.
:type: list[NetworkComparison]
"""
self._network_comparisons = network_comparisons
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/network_comparison_response.py
|
Python
|
apache-2.0
| 13,134
|
[
"VisIt"
] |
b043509fde9fb0751a3951ed5f557ea9482b9abe11a420930a12369615b6a7b3
|
from paraview.simple import *
import os
import sys
import numpy as np
path = os.getcwd() + "/"
file_name = sys.argv[1] + ".e"
csv_name = sys.argv[2] + ".csv"
offset = int(sys.argv[3])
reader = ExodusIIReader(FileName=path+file_name)
tsteps = reader.TimestepValues
writer = CreateWriter(path + csv_name , reader)
writer.FieldAssociation = "Cells" # or "Points"
if len(tsteps) == 0:
writer.UpdatePipeline()
else:
if offset >= 0:
writer.UpdatePipeline(time=tsteps[offset])
else:
writer.UpdatePipeline(time=tsteps[len(tsteps) + offset])
del writer
|
jhaase1/zapdos
|
problems/Schottky_emission/transient/no_ballast/parametric/extra/toCSV2.py
|
Python
|
lgpl-2.1
| 556
|
[
"ParaView"
] |
181f3e22acac6b89e9e63d9e5b62cad009420b54ffaf6cfb1f37e8d3b27a090c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Python wrapper for librfn.
Copyright © 2015 Thomas Unterthiner
Licensed under GPL, version 2 or a later (see LICENSE.txt)
'''
import os
import time
import ctypes as ct
import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
if sys.version_info < (3,):
range = xrange
_curdir = os.path.dirname(os.path.realpath(__file__))
_librfn = ct.cdll.LoadLibrary(os.path.join(_curdir, 'librfn.so'))
_default_gpu_id = -1
_librfn.calculate_W_cpu.argtypes = [
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
ct.c_int, ct.c_int, ct.c_int,
ct.c_int, ct.c_int, ct.c_float]
_librfn.train_cpu.restype = ct.c_int
_librfn.train_cpu.argtypes = [
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int,
ct.c_float, ct.c_float, ct.c_float, ct.c_float,
ct.c_float, ct.c_float, ct.c_float, ct.c_float, ct.c_float,
ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int
]
try:
_librfn.calculate_W_gpu.argtypes = [
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
ct.c_int, ct.c_int, ct.c_int,
ct.c_int, ct.c_int, ct.c_float,
ct.c_int]
_librfn.train_gpu.restype = ct.c_int
_librfn.train_gpu.argtypes = [
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
np.ctypeslib.ndpointer(np.float32),
ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int,
ct.c_float, ct.c_float, ct.c_float, ct.c_float,
ct.c_float, ct.c_float, ct.c_float, ct.c_float, ct.c_float,
ct.c_int, ct.c_int, ct.c_int, ct.c_int, ct.c_int,
ct.c_int
]
except AttributeError as err:
warnings.warn("GPU mode is not available")
_input_noise_types = {"dropout": 1, "saltpepper": 2, "gaussian": 3}
_activation_types = {"linear": 0, "relu": 1, "leaky": 2, "sigmoid": 3, "tanh": 4}
def train_rfn(X, n_hidden, n_iter, etaW, etaP, minP, dropout_rate,
input_noise_rate=0.0, startP=0.1, startW=None,
l2_weightdecay=0.0, l1_weightdecay=0.0,
input_noise_type="saltpepper", activation="relu",
h_threshold=0.0, momentum=0.0, applyNewtonUpdate=True,
batch_size=-1, seed=None, gpu_id="default"):
'''Trains a Rectified Factor Network (RFN).
Trains an RFN as explained in
"Rectified Factor Networks", Clevert et al., NIPS 2015
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Input samples
n_hidden : int
Number of latent variables to estimate
n_iter : int
Number of iterations to run the algorithm
etaW : float
Learning rate of the W parameter
etaP : float
Learning rate of the Psi parameter
(It's probably save to set this to the same value as etaW)
minP : float
Minimal value for Psi. Should be in 1e-8 - 1e-1
dropout_rate : float in [0, 1)
Dropout rate for the latent variables
input_noise_rate : float
Noise/dropout rate for input variables
startW : array-like, shape = (n_hidden, n_features)
Optional pre-initialized weights parameters. Useful if one wants to
continue training of an old result.
l2_weightdecay : float
L2 penalty for weight decay
l1_weightdecay : float
L1 penalty for weight decay
input_noise_type : one of 'dropout', 'saltpapper' or 'gaussian'
Type of input noise
activation : one of ('linear', 'relu', 'leaky', 'sigmoid', 'tanh')
Activation function for hidden/latent variables.
h_threshold : float
Threshhold for rectifying/leaky activations
momentum : float
Momentum term for learning
applyNewtonUpdate : boolean
Whether to use a Newton update (default) or a Gradient Descent step.
batch_size : int
If > 2, this will activate mini-batch learning instead of full
batch learning.
seed : int
Seed for the random number generator
gpu_id : int or "cpu"
ID of the gpu device to use. If set to "cpu", the calculations will
be performed on the CPU instead.
Returns
-------
A tuple of three elements:
W : array-like, shape = (n_hidden, n_features)
The weight matrix W used in the paper, used to transform the
hidden/latent variables back to visibles.
Psi : array-like, shape = (n_features, )
Variance of each input feature dimension (Psi in the paper's formulas)
Wout : array-like, shape = (n_hidden, n_features)
Weight matrix needed to transform the visible variables back into
hidden variables. Normally this is done via
`H = np.maximum(0, np.dot(Wout, X.T))`
'''
if seed is None:
seed = np.uint32(time.time()*100)
if gpu_id == "default":
gpu_id = _default_gpu_id
rng = np.random.RandomState(seed)
if startW is None:
W = rng.normal(scale=0.01, size=(n_hidden, X.shape[1])).astype(np.float32)
W = abs(W)
else:
W = startW.astype(np.float32)
if isinstance(startP, np.ndarray):
P = startP
else:
P = np.array([startP] * X.shape[1], dtype=np.float32)
X = X.astype(np.float32, order="C")
Wout = np.empty((W.shape[0], W.shape[1]), np.float32)
if gpu_id == "cpu":
_librfn.train_cpu(X, W, P, X.shape[0], X.shape[1], n_hidden, n_iter,
batch_size, etaW, etaP, minP, h_threshold, dropout_rate, input_noise_rate,
l2_weightdecay, l1_weightdecay, momentum, _input_noise_types[input_noise_type],
_activation_types[activation], 1, applyNewtonUpdate, seed)
_librfn.calculate_W_cpu(X, W, P, Wout,
X.shape[0], X.shape[1], W.shape[0],
_activation_types[activation], 1, h_threshold)
else:
_librfn.train_gpu(X, W, P, X.shape[0], X.shape[1], n_hidden, n_iter,
batch_size, etaW, etaP, minP, h_threshold, dropout_rate, input_noise_rate,
l2_weightdecay, l1_weightdecay, momentum, _input_noise_types[input_noise_type],
_activation_types[activation], 1, applyNewtonUpdate, seed, gpu_id)
_librfn.calculate_W_gpu(X, W, P, Wout,
X.shape[0], X.shape[1], W.shape[0],
_activation_types[activation], 1, h_threshold,
gpu_id)
return W, P, Wout
|
Tamme/mutationalsignaturesNCSUT
|
RFN/rfn.py
|
Python
|
gpl-3.0
| 6,855
|
[
"Gaussian"
] |
5718f824aff26a5d224e12a45e2c03e3a1ecf28b9395cb04770c5cdd2a2c5e1e
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode.testcase import FileBasedTesting
from textcode import strings
class TestStrings(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_to_filter(self):
assert strings.filter_string('aw w we ww ')
assert not strings.filter_string('w as wew wee wew ')
assert strings.filter_string('as we we we ', 2)
assert not strings.filter_string('as we we we ', 1)
assert not strings.filter_string('asw wew wee wew ')
assert strings.filter_strict('asw wew wee wew ')
assert strings.filter_string('aaaa')
assert not strings.filter_strict('aaaaqa')
def test_is_good(self):
assert not strings.is_good('aw w we ww ')
assert strings.is_good('ww asww wew wee wew ')
self.assertFalse(strings.is_good('asw wew wee wew ',
strings.filter_strict))
assert strings.is_good('gnu as')
assert strings.is_good('gnu as', strings.filter_strict)
assert not strings.is_good('aaqa', strings.filter_strict)
assert strings.is_good('aaqa')
def test_strings_in_file(self):
expected = [
u'__text',
u'__TEXT',
u'__cstring',
u'__TEXT',
u'__jump_table',
u'__IMPORT',
u'__textcoal_nt',
u'__TEXT',
u'_main',
u'___i686.get_pc_thunk.bx',
u'_setlocale',
u'_yyparse',
u'/sw/src/fink.build/bison-2.3-1002/bison-2.3/lib/',
u'main.c',
u'gcc2_compiled.',
u'main:F(0,2)',
u'int:t(0,2)=r(0,2);-2147483648;2147483647;'
]
test_file = self.get_test_loc('strings/basic/main.o')
result = list(strings.strings_in_file(test_file))
assert expected == result
def test_strings_in_file_does_fail_if_contains_ERROR_string(self):
test_file = self.get_test_loc('strings/bin/file_stripped')
list(strings.strings_in_file(test_file))
def test_file_strings_is_good(self):
expected = [
u'__text',
u'__TEXT',
u'__cstring',
u'__TEXT',
u'__jump_table',
u'__IMPORT',
u'__textcoal_nt',
u'__TEXT',
u'_main',
u'___i686.get_pc_thunk.bx',
u'_setlocale',
u'_yyparse',
u'/sw/src/fink.build/bison-2.3-1002/bison-2.3/lib/',
u'main.c',
u'gcc2_compiled.',
u'main:F(0,2)',
u'int:t(0,2)=r(0,2);-2147483648;2147483647;'
]
test_file = self.get_test_loc('strings/basic/main.o')
result = [s for s in strings.file_strings(test_file)
if strings.is_good(s)]
assert expected == result
def test_strings_in_fonts(self):
expected = self.get_test_loc('strings/font/DarkGardenMK.ttf.results')
expected = open(expected, 'rb').read().splitlines()
test_file = self.get_test_loc('strings/font/DarkGardenMK.ttf')
result = [s for s in strings.file_strings(test_file)
if strings.is_good(s)]
assert sorted(expected) == sorted(result)
def test_strings_in_elf(self):
test_file = self.get_test_loc('strings/elf/shash.i686')
result = [s for s in strings.file_strings(test_file)
if strings.is_good(s)]
expected = self.get_test_loc('strings/elf/shash.i686.results')
# with open(expected, 'wb') as o:
# o.write('\n'.join(result))
expected = open(expected, 'rb').read().splitlines()
assert sorted(expected) == sorted(result)
def test_strings_in_obj(self):
test_file = self.get_test_loc('strings/obj/test.o')
result = [s for s in strings.file_strings(test_file)
if strings.is_good(s)]
expected = self.get_test_loc('strings/obj/test.o.results')
# with open(expected, 'wb') as o:
# o.write('\n'.join(result))
expected = open(expected, 'rb').read().splitlines()
assert sorted(expected) == sorted(result)
def test_strings_in_windows_pdb(self):
test_file = self.get_test_loc('strings/pdb/QTMovieWin.pdb')
result = list(strings.file_strings(test_file))
expected = self.get_test_loc('strings/pdb/QTMovieWin.pdb.results')
# with open(expected, 'wb') as o:
# o.write('\n'.join(result))
expected = open(expected, 'rb').read().splitlines()
assert sorted(expected) == sorted(result)
def test_strings_in_all_bin(self):
test_dir = self.get_test_loc('strings/bin', copy=True)
expec_dir = self.get_test_loc('strings/bin-expected')
for tf in os.listdir(test_dir):
result = list(strings.file_strings(os.path.join(test_dir, tf)))
expected = os.path.join(expec_dir, tf + '.strings')
# with open(expected, 'wb') as o:
# o.write('\n'.join(result))
expected = open(expected, 'rb').read().splitlines()
assert sorted(expected) == sorted(result)
|
vinodpanicker/scancode-toolkit
|
tests/textcode/test_strings.py
|
Python
|
apache-2.0
| 6,592
|
[
"VisIt"
] |
6e28dde27faaf71a7f92ecaa63fffa9857479d9f75433bec1f3a6652f870974f
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# define a Single Cube
Scalars = vtk.vtkFloatArray()
Scalars.InsertNextValue(1.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Scalars.InsertNextValue(0.0)
Points = vtk.vtkPoints()
Points.InsertNextPoint(0, 0, 0)
Points.InsertNextPoint(1, 0, 0)
Points.InsertNextPoint(1, 1, 0)
Points.InsertNextPoint(0, 1, 0)
Points.InsertNextPoint(.5, .5, 1)
Ids = vtk.vtkIdList()
Ids.InsertNextId(0)
Ids.InsertNextId(1)
Ids.InsertNextId(2)
Ids.InsertNextId(3)
Ids.InsertNextId(4)
Grid = vtk.vtkUnstructuredGrid()
Grid.Allocate(10, 10)
Grid.InsertNextCell(14, Ids)
Grid.SetPoints(Points)
Grid.GetPointData().SetScalars(Scalars)
# Clip the pyramid
clipper = vtk.vtkClipDataSet()
clipper.SetInputData(Grid)
clipper.SetValue(0.5)
# build tubes for the triangle edges
#
pyrEdges = vtk.vtkExtractEdges()
pyrEdges.SetInputConnection(clipper.GetOutputPort())
pyrEdgeTubes = vtk.vtkTubeFilter()
pyrEdgeTubes.SetInputConnection(pyrEdges.GetOutputPort())
pyrEdgeTubes.SetRadius(.005)
pyrEdgeTubes.SetNumberOfSides(6)
pyrEdgeMapper = vtk.vtkPolyDataMapper()
pyrEdgeMapper.SetInputConnection(pyrEdgeTubes.GetOutputPort())
pyrEdgeMapper.ScalarVisibilityOff()
pyrEdgeActor = vtk.vtkActor()
pyrEdgeActor.SetMapper(pyrEdgeMapper)
pyrEdgeActor.GetProperty().SetDiffuseColor(GetRGBColor('lamp_black'))
pyrEdgeActor.GetProperty().SetSpecular(.4)
pyrEdgeActor.GetProperty().SetSpecularPower(10)
# shrink the triangles so we can see each one
aShrinker = vtk.vtkShrinkFilter()
aShrinker.SetShrinkFactor(1)
aShrinker.SetInputConnection(clipper.GetOutputPort())
aMapper = vtk.vtkDataSetMapper()
aMapper.ScalarVisibilityOff()
aMapper.SetInputConnection(aShrinker.GetOutputPort())
Pyrs = vtk.vtkActor()
Pyrs.SetMapper(aMapper)
Pyrs.GetProperty().SetDiffuseColor(GetRGBColor('banana'))
# build a model of the pyramid
Edges = vtk.vtkExtractEdges()
Edges.SetInputData(Grid)
Tubes = vtk.vtkTubeFilter()
Tubes.SetInputConnection(Edges.GetOutputPort())
Tubes.SetRadius(.01)
Tubes.SetNumberOfSides(6)
TubeMapper = vtk.vtkPolyDataMapper()
TubeMapper.SetInputConnection(Tubes.GetOutputPort())
TubeMapper.ScalarVisibilityOff()
CubeEdges = vtk.vtkActor()
CubeEdges.SetMapper(TubeMapper)
CubeEdges.GetProperty().SetDiffuseColor(GetRGBColor('khaki'))
CubeEdges.GetProperty().SetSpecular(.4)
CubeEdges.GetProperty().SetSpecularPower(10)
# build the vertices of the pyramid
#
Sphere = vtk.vtkSphereSource()
Sphere.SetRadius(0.04)
Sphere.SetPhiResolution(20)
Sphere.SetThetaResolution(20)
ThresholdIn = vtk.vtkThresholdPoints()
ThresholdIn.SetInputData(Grid)
ThresholdIn.ThresholdByUpper(.5)
Vertices = vtk.vtkGlyph3D()
Vertices.SetInputConnection(ThresholdIn.GetOutputPort())
Vertices.SetSourceConnection(Sphere.GetOutputPort())
SphereMapper = vtk.vtkPolyDataMapper()
SphereMapper.SetInputConnection(Vertices.GetOutputPort())
SphereMapper.ScalarVisibilityOff()
CubeVertices = vtk.vtkActor()
CubeVertices.SetMapper(SphereMapper)
CubeVertices.GetProperty().SetDiffuseColor(GetRGBColor('tomato'))
# define the text for the labels
caseLabel = vtk.vtkVectorText()
caseLabel.SetText("Case 1")
aLabelTransform = vtk.vtkTransform()
aLabelTransform.Identity()
aLabelTransform.Translate(-.2, 0, 1.25)
aLabelTransform.Scale(.05, .05, .05)
labelTransform = vtk.vtkTransformPolyDataFilter()
labelTransform.SetTransform(aLabelTransform)
labelTransform.SetInputConnection(caseLabel.GetOutputPort())
labelMapper = vtk.vtkPolyDataMapper()
labelMapper.SetInputConnection(labelTransform.GetOutputPort())
labelActor = vtk.vtkActor()
labelActor.SetMapper(labelMapper)
# define the base
baseModel = vtk.vtkCubeSource()
baseModel.SetXLength(1.5)
baseModel.SetYLength(.01)
baseModel.SetZLength(1.5)
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInputConnection(baseModel.GetOutputPort())
base = vtk.vtkActor()
base.SetMapper(baseMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# position the base
base.SetPosition(.5, -.09, .5)
ren1.AddActor(pyrEdgeActor)
ren1.AddActor(base)
ren1.AddActor(labelActor)
ren1.AddActor(CubeEdges)
ren1.AddActor(CubeVertices)
ren1.AddActor(Pyrs)
ren1.SetBackground(GetRGBColor('slate_grey'))
renWin.SetSize(400, 400)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.3)
ren1.GetActiveCamera().Elevation(15)
ren1.ResetCameraClippingRange()
renWin.Render()
iren.Initialize()
def cases (id, mask):
i = 0
while i < 5:
m = mask[i]
if m & id == 0:
Scalars.SetValue(i, 0)
pass
else:
Scalars.SetValue(i, 1)
pass
caseLabel.SetText("Case " + str(id))
i += 1
Grid.Modified()
renWin.Render()
mask = [1, 2, 4, 8, 16, 32]
cases(20, mask)
# iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/General/Testing/Python/clipPyramid.py
|
Python
|
bsd-3-clause
| 5,443
|
[
"VTK"
] |
401f1ab7fb4ee13253300c517da40edc2fd332e689dea186fa7a2a146f9202ba
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
FamilyPage - Family index page and individual Family pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (EventType, Family)
from gramps.gen.plug.report import Bibliography
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _KEYPERSON,
alphabet_navigation, sort_people,
primary_difference, first_letter,
FULLCLEAR, get_index_letter)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# creates the Family List Page and Family Pages
#
#################################################
class FamilyPages(BasePage):
"""
This class is responsible for displaying information about the 'Family'
database objects. It displays this information under the 'Families'
tab. It is told by the 'add_instances' call which 'Family's to display,
and remembers the list of Family. A single call to 'display_pages'
displays both the Family List (Index) page and all the Family
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.family_dict = defaultdict(set)
self.familymappages = None
def display_pages(self, title):
"""
Generate and output the pages under the Family tab, namely the family
index and the individual family pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Family]")
for item in self.report.obj_dict[Family].items():
LOG.debug(" %s", str(item))
message = _("Creating family pages...")
index = 1
with self.r_user.progress(_("Narrated Web Site Report"), message,
len(self.report.obj_dict[Family]) + 1
) as step:
for family_handle in self.report.obj_dict[Family]:
step()
index += 1
self.familypage(self.report, title, family_handle)
step()
self.familylistpage(self.report, title,
self.report.obj_dict[Family].keys())
def familylistpage(self, report, title, fam_list):
"""
Create a family index
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: fam_list -- The handle for the place to add
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("families")
result = self.write_header(self._("Families"))
familieslistpage, dummy_head, dummy_body, outerwrapper = result
ldatec = 0
prev_letter = " "
# begin Family Division
with Html("div", class_="content", id="Relationships") as relationlist:
outerwrapper += relationlist
# Families list page message
msg = self._("This page contains an index of all the "
"families/ relationships in the "
"database, sorted by their family name/ surname. "
"Clicking on a person’s "
"name will take you to their "
"family/ relationship’s page.")
relationlist += Html("p", msg, id="description")
# go through all the families, and construct a dictionary of all the
# people and the families thay are involved in. Note that the people
# in the list may be involved in OTHER families, that are not listed
# because they are not in the original family list.
pers_fam_dict = defaultdict(list)
for family_handle in fam_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
if family.get_change_time() > ldatec:
ldatec = family.get_change_time()
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
pers_fam_dict[husband_handle].append(family)
if spouse_handle:
pers_fam_dict[spouse_handle].append(family)
# add alphabet navigation
index_list = get_first_letters(self.r_db, pers_fam_dict.keys(),
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
relationlist += alpha_nav
# begin families table and table head
with Html("table", class_="infolist relationships") as table:
relationlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# set up page columns
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [(self._("Letter"),
"ColumnRowLabel"),
(self._("Person"), "ColumnPartner"),
(self._("Family"), "ColumnPartner"),
(self._("Marriage"), "ColumnDate"),
(self._("Divorce"), "ColumnDate")]
)
tbody = Html("tbody")
table += tbody
# begin displaying index list
ppl_handle_list = sort_people(self.r_db, pers_fam_dict.keys(),
self.rlocale)
first = True
for (surname, handle_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = get_index_letter(first_letter(surname),
index_list,
self.rlocale)
else:
letter = ' '
# get person from sorted database list
for person_handle in sorted(
handle_list, key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person:
family_list = person.get_family_handle_list()
first_family = True
for family_handle in family_list:
get_family = self.r_db.get_family_from_handle
family = get_family(family_handle)
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnRowLabel")
trow += tcell
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class="BeginLetter"'
ttle = self._("Families beginning with "
"letter ")
tcell += Html("a", letter, name=letter,
title=ttle + letter,
inline=True)
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
if first_family:
trow.attr = 'class ="BeginFamily"'
tcell += self.new_person_link(
person_handle, uplink=self.uplink)
first_family = False
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
tcell += self.family_link(
family.get_handle(),
self.report.get_family_name(family),
family.get_gramps_id(), self.uplink)
# family events; such as marriage and divorce
# events
fam_evt_ref_list = family.get_event_ref_list()
tcell1 = Html("td", class_="ColumnDate",
inline=True)
tcell2 = Html("td", class_="ColumnDate",
inline=True)
trow += (tcell1, tcell2)
if fam_evt_ref_list:
fam_evt_srt_ref_list = sorted(
fam_evt_ref_list,
key=self.sort_on_grampsid)
for evt_ref in fam_evt_srt_ref_list:
evt = self.r_db.get_event_from_handle(
evt_ref.ref)
if evt:
evt_type = evt.get_type()
if evt_type in [EventType.MARRIAGE,
EventType.DIVORCE]:
cell = self.rlocale.get_date(
evt.get_date_object())
if (evt_type ==
EventType.MARRIAGE):
tcell1 += cell
else:
tcell1 += ' '
if (evt_type ==
EventType.DIVORCE):
tcell2 += cell
else:
tcell2 += ' '
else:
tcell1 += ' '
tcell2 += ' '
first_family = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familieslistpage, output_file, sio, ldatec)
def familypage(self, report, title, family_handle):
"""
Create a family page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: family_handle -- The handle for the family to add
"""
family = report.database.get_family_from_handle(family_handle)
if not family:
return
BasePage.__init__(self, report, title, family.get_gramps_id())
ldatec = family.get_change_time()
self.bibli = Bibliography()
self.uplink = True
family_name = self.report.get_family_name(family)
self.page_title = family_name
self.familymappages = report.options["familymappages"]
output_file, sio = self.report.create_file(family.get_handle(), "fam")
result = self.write_header(family_name)
familydetailpage, dummy_head, dummy_body, outerwrapper = result
# begin FamilyDetaill division
with Html("div", class_="content",
id="RelationshipDetail") as relationshipdetail:
outerwrapper += relationshipdetail
# family media list for initial thumbnail
if self.create_media:
media_list = family.get_media_list()
# If Event pages are not being created, then we need to display
# the family event media here
if not self.inc_events:
for evt_ref in family.get_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
media_list += event.get_media_list()
relationshipdetail += Html(
"h2", self.page_title, inline=True) + (
Html('sup') + (Html('small') +
self.get_citation_links(
family.get_citation_list())))
# display relationships
families = self.display_family_relationships(family, None)
if families is not None:
relationshipdetail += families
# display additional images as gallery
if self.create_media and media_list:
addgallery = self.disp_add_img_as_gallery(media_list, family)
if addgallery:
relationshipdetail += addgallery
# Narrative subsection
notelist = family.get_note_list()
if notelist:
relationshipdetail += self.display_note_list(notelist, Family)
# display family LDS ordinance...
family_lds_ordinance_list = family.get_lds_ord_list()
if family_lds_ordinance_list:
relationshipdetail += self.display_lds_ordinance(family)
# get attribute list
attrlist = family.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
relationshipdetail += attrsection
# source references
srcrefs = self.display_ind_sources(family)
if srcrefs:
relationshipdetail += srcrefs
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familydetailpage, output_file, sio, ldatec)
|
sam-m888/gramps
|
gramps/plugins/webreport/family.py
|
Python
|
gpl-2.0
| 17,542
|
[
"Brian"
] |
a9f9e878907d0975cb517b64e9581e5ad86719ce6f257aec17c9529236bb030b
|
import sys
sys.path.append('../../../vmdgadgets')
import vmdutil
from vmdutil import vmddef
def avoid_collision1(vmd):
center = vmdutil.str_to_b('センター')
right_arm = vmdutil.str_to_b('右腕')
right_elb = vmdutil.str_to_b('右ひじ')
#1032 追加
c_1032 = vmddef.BONE_SAMPLE._replace(
frame=1032, name=center,
position=(2.141899824142456, -0.4837593138217926, 3.3104584217071533))
ra_1032 = vmddef.BONE_SAMPLE._replace(
frame=1032, name=right_arm,
rotation=(0.17264382541179657, 0.13622286915779114, 0.361983984708786, 0.905872106552124))
re_1032 = vmddef.BONE_SAMPLE._replace(
frame=1032, name=right_elb,
rotation=(0.635169506072998, -0.47617700695991516, -0.3107016980648041, 0.5227601528167725))
# 1036 センター削除
# 1036 入れ替え
ra_1036_rot = (0.17034076154232025, 0.14684468507766724, 0.3597466051578522, 0.905540406703949)
re_1036_rot = (0.6970929503440857, -0.4250965118408203, -0.27151989936828613, 0.5095357894897461)
bone_frames = vmd.get_frames('bones')
new_frames = []
for frame in bone_frames:
if frame.frame != 1036:
new_frames.append(frame)
else:
bone_name = vmdutil.b_to_str(frame.name)
if bone_name == 'センター':
continue
elif bone_name == '右腕':
new_frames.append(frame._replace(
rotation=ra_1036_rot))
elif bone_name == '右ひじ':
new_frames.append(frame._replace(
rotation=re_1036_rot))
else:
new_frames.append(frame)
new_frames.append(c_1032)
new_frames.append(ra_1032)
new_frames.append(re_1032)
vmd.set_frames('bones', new_frames)
return
def avoid_collision2(vmd):
modify = {
1317: {
'右ひじ':((0.0, 0.0, 0.0), (0.5303139090538025, -0.5535628199577332, 0.12653225660324097, 0.6295421719551086))},
1322: {
'右腕': ((0.0, 0.0, 0.0), (0.25283217430114746, 0.24778856337070465, -0.011708247475326061, 0.9351678490638733)),
'右ひじ': ((0.0, 0.0, 0.0), (0.5284314155578613, -0.5674738883972168, 0.11908495426177979, 0.6201204061508179))},
1330: {
'センター': ((1.5169070959091187, -0.11587631702423096, 3.358367443084717), (0.0, -0.0, -0.0, 1.0)),
'右腕': ((0.0, 0.0, 0.0), (0.20906804502010345, 0.13286317884922028, 0.27482205629348755, 0.9290382266044617)),
'右ひじ': ((0.0, 0.0, 0.0), (0.6034250259399414, -0.5745618939399719, -0.04226955026388168, 0.5513404607772827))},
1333: {
'右ひじ': ((0.0, 0.0, 0.0), (0.6193941831588745, -0.5532808899879456, -0.14503660798072815, 0.5377723574638367))},
1339: {
'センター': ((2.3029496669769287, 0.0, 1.9691747426986694), (0.0, -0.0, -0.0, 1.0)),
'右腕': ((0.0, 0.0, 0.0), (0.18227244913578033, 0.19123366475105286, 0.2656165361404419, 0.9271727800369263)),
'右ひじ': ((0.0, 0.0, 0.0), (0.574394702911377, -0.5680624842643738, -0.36492064595222473, 0.4628258943557739))},
1342: {
'右ひじ': ((0.0, 0.0, 0.0), (0.574394702911377, -0.5680624842643738, -0.36492064595222473, 0.4628258943557739))},
1346: {
'センター': ((3.0592546463012695, 0.0, 0.2976529598236084), (0.0, -0.0, -0.0, 1.0)),
'右腕': ((0.0, 0.0, 0.0), (0.1709841787815094, 0.2831650674343109, 0.12234087288379669, 0.9357418417930603)),
'右ひじ': ((0.0, 0.0, 0.0), (0.6452091336250305, -0.5575323700904846, -0.011836055666208267, 0.5222232341766357))}
}
bone_frames = vmd.get_frames('bones')
for i, frame in enumerate(bone_frames):
bone_name = vmdutil.b_to_str(frame.name)
if frame.frame in modify:
if bone_name in modify[frame.frame]:
pos, rot = modify[frame.frame][bone_name]
bone_frames[i] = frame._replace(
position=pos, rotation=rot)
return
if __name__ == '__main__':
vmd_name = sys.argv[1]
vmd = vmdutil.Vmdio()
vmd.load(vmd_name)
avoid_collision1(vmd)
avoid_collision2(vmd)
vmd.store(vmd_name)
|
Hashi4/vmdgadgets
|
sample/lookat/sm31942771/minor_adjust.py
|
Python
|
apache-2.0
| 4,178
|
[
"VMD"
] |
e6f427f2f96cfb783dca861a4911f4d7dac803c44ecc7b8f74cdc9d64a6d7a8c
|
# Bioclimatic Chart
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Abraham Yezioro <ayez@ar.technion.ac.il>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
This is the Bioclimactic Chart. It is based in the originally proposed chart by V. Olgyay and then in the chart presented in the book "Sun, Climate and Architecture" by Brown.
Use this component to draw a Bioclimatic chart in the Rhino scene and evaluate a set of temperatures and humidity ratios in terms of indoor comfort. Connected data can include either outdoor temperature and humidty ratios from imported EPW weather data, indoor temperature and humidity ratios from an energy simulation, or indivdual numerical inputs of temperature and humidity. The input data will be plotted alongside polygons on the chart representing comfort as well as polygons representing the efects of passive building strategies on comfort.
References:
1. Olgyay, V., 1963. Design with Climate. Bioclimatic Approach to Architectural Regionalism. Van Nostrand reinhold, New York.
2. Givoni B., 1976. Man, Climate and Architecture. Applied Science Publishers, Ltd., London.
3. Murray M. and Givoni B., 1979. Architectural Design Based on Climate in Watson D. (ed), 1979. Energy COnservation Through Building Design. McGraw Hill Book Company.
4. Yezioro, A. & E. Shaviv. 1996. A Knowledge Based CAD System for Determining Thermal Comfort Design Strategies. Renewable Energy, 8: (1-4), (pp. 133-138).
5. Brown G.Z. and DeKay M., 2001. Sun, WInd & Light. Architectural Design Strategies (2nd edition). John WIley & Sons, Inc.
-
Provided by Ladybug 0.0.60
Args:
_dryBulbTemperature: A number representing the dry bulb temperature of the air in degrees Celcius. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component. Indoor temperatures from Honeybee energy simulations are also possible inputs.
_relativeHumidity: A number between 0 and 100 representing the relative humidity of the air in percentage. This input can also accept a list of relative humidity values representing conditions at different times or the direct output of relativeHumidity from of the Import EPW component.
------------------------------: ...
metabolicRate_: A number representing the metabolic rate of the human subject in met. This input can also accept text inputs for different activities. Acceptable text inputs include Sleeping, Reclining, Sitting, Typing, Standing, Driving, Cooking, House Cleaning, Walking, Walking 2mph, Walking 3mph, Walking 4mph, Running 9mph, Lifting 10lbs, Lifting 100lbs, Shoveling, Dancing, and Basketball. If no value is input here, the component will assume a metabolic rate of 1 met, which is the metabolic rate of a seated human being.
clothingLevel_: A number representing the clothing level of the human subject in clo. If no value is input here, the component will assume a clothing level of 1 clo, which is roughly the insulation provided by a 3-piece suit. A person dressed in shorts and a T-shirt has a clothing level of roughly 0.5 clo and a person in a thick winter jacket can have a clothing level as high as 2 to 4 clo.
passiveStrategy_: An optional text input of passive strategies to be laid over the Bioclimatic chart as polygons. Text inputs include "Passive Solar Heating", "Evaporative Cooling", "Thermal Mass + Night Vent" and "Natural Ventilation". NOT WORKING RIGHT NOW!!
------------------------------: ...
cullMesh_: Set to "True" to cull the colored mesh to where they have climatic data on them. See chartMesh output. Deafult "False"
calculateCharts_: Set to "True" to calculate and show a column type graph showing the percentage of time each strategy is capable of providing comfort conditions. See resultsChart output. Deafult "False"
------------------------------: ...
analysisPeriodWinter_: An optional analysis period from the Analysis Period component. If no Analysis period is given and epw data from the ImportEPW component has been connected, the analysis will be run for the enitre year. ONLY WORKS FOR THE WHOLE YEAR RIGHT NOW!!
analysisPeriodSummer_: An optional analysis period from the Analysis Period component. If no Analysis period is given and epw data from the ImportEPW component has been connected, the analysis will be run for the enitre year. ONLY WORKS FOR THE WHOLE YEAR RIGHT NOW!!
basePoint_: An optional base point that will be used to place the Bioclimatic Chart in the Rhino scene. If no base point is provided, the base point will be the Rhino model origin.
scale_: An optional number to change the scale of the Bioclimatic chart in the Rhino scene. By default, this value is set to 1.
legendPar_: Optional legend parameters from the Ladybug Legend Parameters component.
_runIt: Set to "True" to run the component and calculate the adaptive comfort metrics.
Returns:
readMe!: ...
------------------------------: ...
comfortResults: The number of hours and percent of the input data that are inside all comfort and passive strategy polygons.
totalComfortOrNot: A list of 0's and 1's indicating, for each hour of the input data, if the hour is inside a comfort and/or strategy polygon (1) or not(0).
strategyOrNot: A list of 0's and 1's indicating, for each hour of the input temperature and humidity ratio, if the hour is inside (1) or not(0), for each passive strategy and comfort polygons. If there are multiple comfort polyogns or passive strategies connected to the passiveStrategy_ input, this output will be a grafted list for each polygon.
------------------------------: ...
chartGridAndTxt: The grid and text labels of the Bioclimatic chart.
chartMesh: A colored mesh showing the number of input hours happen in each part of the Bioclimatic chart.
chartHourPoints: Points representing each of the hours of input temperature and humidity ratio. By default, this ouput is hidden and, to see it, you should connect it to a Grasshopper preview component.
hourPointColorsByComfort: Color the chartHourPoints above according to Comfort results. They can be hooked up to the "Swatch" input of a Grasshopper Preview component that has the hour points above connected as geometry. By default, points are colored red if they lie inside comfort or strategy polygons and are colored blue if they do not meet such comfort criteria.
hourPointColorsByMonth: Colors that the chartHourPoints above according to each month. They can be hooked up to the "Swatch" input of a Grasshopper Preview component that has the hour points above connected as geometry. By default, points are colored red if they lie inside comfort or strategy polygons and are colored blue if they do not meet such comfort criteria.
min_maxPoints: Plot each month's Minimal/Maximal values for Temperature and Relative Humidity. By default, this ouput is hidden and, to see it, you should connect it to a Grasshopper preview component.
comfort_strategyPolygons: A tree of polygons representing the comfort and passive strategies areas of the chart made comfortable.
legend: A colored legend showing the number of hours that correspond to each color for the chartMesh output.
legendBasePt: The legend base point, which can be used to move the legend in relation to the chart with the grasshopper "move" component.
------------------------------: ...
resultsChart: A column type graph showing the percentage of time each strategy is capable of providing comfort conditions. These results are summarizing the whole year and each month. Each column shows three areas:
Comfort Zone (black),
Passive Solar Heating (yellow), as the only heating strategy for winter time
Evaporative Cooling or High Termal Mass with Night Ventilation or Natural Ventilation (green, red, blue) as the possible cooling strategies for summer time.
"""
ghenv.Component.Name = "Ladybug_Bioclimatic Chart"
ghenv.Component.NickName = 'Bioclimatic Chart'
ghenv.Component.Message = 'VER 0.0.60\nJUL_21_2015'
ghenv.Component.Category = "Ladybug"
#ghenv.Component.SubCategory = "2 | VisualizeWeatherData"
ghenv.Component.SubCategory = "6 | WIP"
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
import math
import scriptcontext as sc
import Rhino as rc
import rhinoscriptsyntax as rs
import System
from System import Object
from clr import AddReference as addr
addr("Grasshopper")
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
###################################
import Rhino.Geometry as rg
#Define Model Tolerance
tol = sc.doc.ModelAbsoluteTolerance
meshingP = rc.Geometry.MeshingParameters.Coarse
meshingP.SimplePlanes = True
lb_visualization = sc.sticky["ladybug_ResultVisualization"]()
lb_preparation = sc.sticky["ladybug_Preparation"]()
MonthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
MonthDays = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365]
#MonthDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365]
StNames = ['ComfortZone', 'PassiveSolarHeating', 'EvaporativeCooling', 'HighTermalMass+NightVent', 'NaturalVentilation']
def checkInputs():
#Define a value that will indicate whether someone has hooked up epw data.
epwData = False
epwStr = []
checkData1 = False
if _dryBulbTemperature and _relativeHumidity:
try:
#if str(_dryBulbTemperature[2]) == 'Dry Bulb Temperature' and \
# str(_relativeHumidity[2]) == 'Relative Humidity':
if "Temperature" in _dryBulbTemperature[2] and "Humidity" in _relativeHumidity[2]:
epwData = True
epwStr = _dryBulbTemperature[0:7]
checkData1 = True
else: pass
except: pass
else:
print 'Connect a temperature in degrees celcius for _dryBulbTemperature and relative humidity to the proper input items'
#Check the metabolic rate.
##checkData2 = False
#if len(metabolicRate_) > 0:
#if metabolicRate_ > 9.5 or metabolicRate_ < 0.5:
#print 'metabolicRate_ ', metabolicRate_
#print 'You entered a probably invalid metabolic rate (%.1f met). Changed to Standing rate (1.2 met) ' (float(metabolicRate_))
#metabolicRate_ = 1.2
##checkData2 = True
#else:
#print 'metabolicRate_ ', metabolicRate_
##checkData2 = True
#Check the passive strategy inputs to be sure that they are correct.
checkData3 = True
if len(passiveStrategy_) > 0:
for item in passiveStrategy_:
if item == "Passive Solar Heating" or item == "Evaporative Cooling" or item == "Thermal Mass + Night Vent" or item == "Natural Ventilation": pass
else: checkData3 = False
if checkData3 == False:
warning = 'Input for passiveStrategy_ is not valid.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#If all of the checkDatas have been good to go, let's give a final go ahead.
##if checkData1 == True and checkData2 and checkData3 == True :
if checkData1 == True and checkData3 == True :
checkData = True
else:
checkData = False
#print 'checkData ', checkData
return checkData, epwData, epwStr
#Define colors for the following output situations (points and legends): Month colored (12 colors), comfortNOcomfort (2 colors).
def colors():
customColors = [System.Drawing.Color.FromArgb(75, 107, 169), System.Drawing.Color.FromArgb(115, 147, 202),
System.Drawing.Color.FromArgb(170, 200, 247), System.Drawing.Color.FromArgb(193, 213, 208),
System.Drawing.Color.FromArgb(245, 239, 103), System.Drawing.Color.FromArgb(252, 230, 74),
System.Drawing.Color.FromArgb(239, 156, 21), System.Drawing.Color.FromArgb(234, 123, 0),
System.Drawing.Color.FromArgb(234, 74, 0), System.Drawing.Color.FromArgb(234, 38, 0)]
monthColors = [System.Drawing.Color.FromArgb(0, 0, 0), System.Drawing.Color.FromArgb(0, 125, 0),
System.Drawing.Color.FromArgb(0, 255, 0), System.Drawing.Color.FromArgb(0, 125, 125),
System.Drawing.Color.FromArgb(0, 125, 255), System.Drawing.Color.FromArgb(0, 255, 255),
System.Drawing.Color.FromArgb(0, 0, 125), System.Drawing.Color.FromArgb(0, 0, 255),
System.Drawing.Color.FromArgb(125, 0, 0), System.Drawing.Color.FromArgb(255, 0, 0),
System.Drawing.Color.FromArgb(125, 125, 0), System.Drawing.Color.FromArgb(255, 125, 0)]
comfortNOcomfortColors = [System.Drawing.Color.FromArgb(75, 107, 169), # Comfort
System.Drawing.Color.FromArgb(234, 38, 0)] # NoComfort
strategiesColors = [System.Drawing.Color.FromArgb(50, 50, 50), # CZ
System.Drawing.Color.FromArgb(200, 200, 0), # PSH or (220, 220, 50)
System.Drawing.Color.FromArgb(50, 220, 50), # EC
System.Drawing.Color.FromArgb(220, 50, 50), # HTM
System.Drawing.Color.FromArgb(50, 50, 220)] # NV
return monthColors, comfortNOcomfortColors, strategiesColors
#Define a function to offset curves and return things that will stand out on the Bioclimatic chart.
def outlineCurve(curve):
try:
offsetCrv = curve.Offset(rc.Geometry.Plane.WorldXY, 0.25, sc.doc.ModelAbsoluteTolerance, rc.Geometry.CurveOffsetCornerStyle.Sharp)[0]
finalBrep = (rc.Geometry.Brep.CreatePlanarBreps([curve, offsetCrv])[0])
except:
finalBrep = rc.Geometry.Brep.CreatePlanarBreps([curve])[0]
warning = "Creating an outline of one of the comfort or strategy curves failed. Component will return a solid brep."
print warning
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return finalBrep
def createResultsLegend(orgX, orgY, orgZ, gridStep, monthStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, lb_preparation, legendFontSize, legendFont, strategiesColors):
if legendFontSize == None: legendFontSize = 2
#Make axis labels for the chart.
xAxisLabels = []
xAxisTxt = ["Comfort Strategies"]
xAxisPt = [rc.Geometry.Point3d(orgX + 0., orgY - 5.0, 0)]
xAxisLabels.extend(text2srf(xAxisTxt, xAxisPt, legendFont, legendFontSize*1.25)[0])
# Make the percentage text for the chart Y axis.
percentText = []
percentLabels = []
percentLabelBasePts = []
percentNum = range(0, 110, 10)
for count, percent in enumerate(percentNum):
percentLabelBasePts.append(rc.Geometry.Point3d(orgX - 8, orgY + (percent)-0.75, 0))
percentText.append(str(percent)+"%")
for count, text in enumerate(percentText):
percentLabels.extend(text2srf([text], [percentLabelBasePts[count]], legendFont, legendFontSize*.75)[0])
# Make the Title at top.
titleLabels = []
titleTxt = "Year Results"
titlePt = [rc.Geometry.Point3d(orgX + 0., (100 + orgY + 1), 0)]
titleLabels.extend(text2srf([titleTxt], [titlePt[0]], legendFont, legendFontSize*1.5)[0])
#Months titles loop
step = monthStep
for m in range(0, len(MonthNames)):
#titleLabels = []
titleTxt = MonthNames[m]
titlePt = [rc.Geometry.Point3d(orgX + step, (100 + orgY + 1), 0)]
step += monthStep
titleLabels.extend(text2srf([titleTxt], [titlePt[0]], legendFont, legendFontSize*1.5)[0])
# Create legend with Strategies Names, at bottom right.
legLabels = []
legLabels1 = []
legResultstText = []
legPolygon = []
legLabelBasePts = []
legLabelBasePts1 = []
legColorLabelBasePts = []
legColorLabelBasePts1 = []
legNum = range(int(orgY), 50, 8)
radius = 2.0
segments = 4
for count, leg in enumerate(legNum):
legLabelBasePts.append(rc.Geometry.Point3d(xLineValue[12] + 50, leg + 1.50, 0))
legColorLabelBasePts.append(rc.Geometry.Point3d(xLineValue[12] + 46, leg + 1.25, 0))
legColorLabelBasePts1.append(rc.Geometry.Point3d(xLineValue[12] + 50, leg - 1.0, 0)) ####
for count, text in enumerate(strategyNames):
combString = '%.1f%% - %d hours' % (strategyPercent[count], strategyHours[count])
legResultstText.append(combString) #strategyPercent, strategyHours
for count, text in enumerate(legResultstText):
legLabels1.extend(text2srf([text], [legColorLabelBasePts1[count]], legendFont, legendFontSize*.75)[0])
for count, text in enumerate(strategyNames):
if count == 0: colorP = strategiesColors[0] # CZ
elif count == 1: colorP = strategiesColors[1] # PSH or (220, 220, 50)
elif count == 2: colorP = strategiesColors[2] # EC
elif count == 3: colorP = strategiesColors[3] # HTM
elif count == 4: colorP = strategiesColors[4] # NV
else : colorP = System.Drawing.Color.FromArgb(255, 255, 255, 255)
legLabels.extend(text2srf([text], [legLabelBasePts[count]], legendFont, legendFontSize*0.75)[0])
legPol = drawPolygon(legColorLabelBasePts[count], radius, segments, colorP)
legPolygon.append(legPol)
#Bring all legend and text together in one list.
restText = []
for item in percentLabels:
restText.append(item)
#for item in relHumidLabels:
# restText.append(item)
for item in xAxisLabels:
restText.append(item)
#for item in yAxisLabels:
# restText.append(item)
for item in titleLabels:
restText.append(item)
for item in legLabels:
restText.append(item)
for item in legLabels1:
restText.append(item)
for item in legPolygon:
restText.append(item)
return restText
def createChartLayout(orgX, orgY, orgZ, location, legendFont, legendFontSize):
if legendFontSize == None: legendFontSize = 2
#Make axis labels for the chart.
xAxisLabels = []
xAxisTxt = ["Humidity Ratio"]
#xAxisPt = [rc.Geometry.Point3d(orgX + 35., orgY - 10.0, 0)]
xAxisPt = [rc.Geometry.Point3d(orgX, orgY - 10.0, 0)]
xAxisLabels.extend(text2srf(xAxisTxt, xAxisPt, legendFont, legendFontSize*1.25)[0])
yAxisLabels = []
yAxisTxt = ["Dry Bulb Temperature"]
#yAxisPt = [rc.Geometry.Point3d(orgX - 10.0, orgY + 35., 0)]
yAxisPt = [rc.Geometry.Point3d(orgX - 10.0, orgY, 0)]
yAxisLabels.extend(text2srf(yAxisTxt, yAxisPt, legendFont, legendFontSize*1.25)[0])
#rotateTransf = rc.Geometry.Transform.Rotation(1.57079633, rc.Geometry.Point3d(orgX - 10.0, orgY + 35., 0))
rotateTransf = rc.Geometry.Transform.Rotation(1.57079633, rc.Geometry.Point3d(orgX - 10.0, orgY, 0))
for geo in yAxisLabels:
geo.Transform(rotateTransf)
#tempNum = range(orgY + 5, 55, 5)
tempNum = range(int(orgY/2), 55, 5)
relHumidNum = range(0, 110, 10)
# Make the relative humidity text for the chart.
relHumidBasePts = []
relHumidTxt = []
relHumidLabels = []
for count, humid in enumerate(relHumidNum):
#print 'count ', count, humid
relHumidBasePts.append(rc.Geometry.Point3d(humid - 2, orgY - 3, 0))
relHumidTxt.append(str(humid)+"%")
for count, text in enumerate(relHumidTxt):
relHumidLabels.extend(text2srf([text], [relHumidBasePts[count]], legendFont, legendFontSize*.75)[0])
# Make the temperature text for the chart.
tempText = []
tempLabels = []
tempLabelBasePts = []
for count, temp in enumerate(tempNum):
#print 'count ', count, temp
tempLabelBasePts.append(rc.Geometry.Point3d(-5, (temp * 2)-0.75, 0))
tempText.append(str(temp))
for count, text in enumerate(tempText):
tempLabels.extend(text2srf([text], [tempLabelBasePts[count]], legendFont, legendFontSize*.75)[0])
titleLabels = []
titleTxt = ["Bio Climatic Chart", location]
titlePt = [rc.Geometry.Point3d(orgX, 108, 0),
rc.Geometry.Point3d(orgX, 103, 0)]
for count, text in enumerate(titleTxt):
titleLabels.extend(text2srf([text], [titlePt[count]], legendFont, legendFontSize*1.5)[0])
#Bring all text and curves together in one list.
chartLayout = []
for item in tempLabels:
chartLayout.append(item)
for item in relHumidLabels:
chartLayout.append(item)
for item in xAxisLabels:
chartLayout.append(item)
for item in yAxisLabels:
chartLayout.append(item)
for item in titleLabels:
chartLayout.append(item)
return chartLayout
def createChartLegend(orgX, orgY, orgZ, strategyNames, lb_preparation, legendScale, legendFont, legendFontSize, lb_visualization, strategiesColors, monthColors, comfortNOcomfortColors, customColors, totalComfortOrNot):
if legendFontSize == None: legendFontSize = 2
# ************** Generate a legend for strategies ***********
legStrategyLabels = []
legLabelBasePts = []
finalLegPolyline = []
legMonthLabels = []
legMonthLabelBasePts = []
legComfLabels = []
legComfLabelBasePts = []
shiftY = -20 #-15
leg = int(orgY) + shiftY
#legNum = range(int(orgY) + shiftY, -23 + shiftY, -3) # [-25, -28, -31, -34, -37] Need 5 values for 5 strategies
legStrategyPolygons = []
for count, text in enumerate(strategyNames):
if count == 0: color = strategiesColors[0] # CZ
elif count == 1: color = strategiesColors[1] # PSH or (220, 220, 50)
elif count == 2: color = strategiesColors[2] # EC
elif count == 3: color = strategiesColors[3] # HTM
elif count == 4: color = strategiesColors[4] # NV
else : color = System.Drawing.Color.FromArgb(255, 0, 255, 255)
legLinePt = []
legLabelBasePts.append(rc.Geometry.Point3d(orgX + 10, leg + 0.0, 0)) # Base point for each strategy name
points = [orgX, leg + 0.75, 0.0],[orgX + 9.0, leg + 0.75, 0.0], [orgX + 9.0, leg + 0.751, 0.0], [orgX, leg + 0.751, 0.0], \
[orgX, leg + 0.75, 0.0] # Coords for the line in the strategies legend
for cc in range(0, len(points)):
legLinePt.append(rc.Geometry.Point3d(points[cc][0], points[cc][1], points[cc][2]))
legPolyline = rc.Geometry.PolylineCurve(legLinePt)
leg += -3
finalLegPolyline.append(outlineCurve(legPolyline)) # This is the element to use for coloring the legend
legMesh = rc.Geometry.Mesh()
legMesh.Append(rc.Geometry.Mesh.CreateFromBrep(finalLegPolyline[count])[0])
legMesh.VertexColors.CreateMonotoneMesh(color)
legStrategyPolygons.append(legMesh)
# Draw the strategies names
legStrategyLabels.extend(text2srf([text], [legLabelBasePts[count]], legendFont, legendFontSize*0.75)[0])
# ************** End Legend of strategies ***********
"""
# ************** Generate a legend for comfortOrNot Using Chris's way from Psychrometric chart ***********
leg = int(orgY) + shiftY - 11.0
##pointColors = []
legComfBasePts = rc.Geometry.Point3d(orgX + 45, leg, 0) # Base point comfortOrNot legend
if str(totalComfortOrNot[0]) == "key:location/dataType/units/frequency/startsAt/endsAt":
totalComfortOrNot = totalComfortOrNot[7:]
##pointColors.append(lb_visualization.gradientColor(totalComfortOrNot, 0, 1, customColors))
legend = []
#legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(totalComfortOrNot, 0, 1, 2, "Comfort", lb_visualization.BoundingBoxPar, legComfBasePts, legendScale, legendFont, legendFontSize)
legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(totalComfortOrNot, 0, 1, 2, "Comfort or Not", lb_visualization.BoundingBoxPar, legComfBasePts, .45, legendFont, 1.5)
legendColors = lb_visualization.gradientColor(legendText[:-1], 0, 1, customColors)
legendSrfs = lb_visualization.colorMesh(legendColors, legendSrfs)
legend.append(legendSrfs)
for list in legendTextCrv:
for item in list:
legend.append(item)
# ************** End Legend of comfortOrNot ***********
"""
radius = 0.7
segments = 6
# ************** Generate a legend for comfortOrNot ***********
leg = int(orgY) + shiftY
legComfCircle = []
circCenter = []
circPolyline = []
for m in range(0, 2): # 0 to 2 for Comfort or NoComfort possibilities
if m == 0: text = "No Comfort"
else : text = "Comfort"
circCenter.append(rc.Geometry.Point3d(orgX + 55, leg + radius/2, 0)) # Base point for each month circle
legComfLabelBasePts.append(rc.Geometry.Point3d(orgX + 57, leg + 0.0, 0)) # Base point for each month name
leg += -3
# Draw the comfortOrNot circles and value
cP, dataPolyline = circPoints(circCenter[m], radius, segments)
circPolyline.append(outlineCurve(dataPolyline)) # This is the element to use for coloring the legend
legMesh = rc.Geometry.Mesh()
legMesh.Append(rc.Geometry.Mesh.CreateFromBrep(circPolyline[m])[0])
legMesh.VertexColors.CreateMonotoneMesh(comfortNOcomfortColors[m])
legComfCircle.append(legMesh)
legComfLabels.extend(text2srf([text], [legComfLabelBasePts[m]], legendFont, legendFontSize*0.75)[0])
# ************** End Legend of comfortOrNot ***********
# ************** Generate a legend for Month colored ***********
leg = int(orgY) + shiftY
legMonthsCircle = []
circCenter = []
circPolyline = []
for count, text in enumerate(MonthNames):
circCenter.append(rc.Geometry.Point3d(orgX + 85, leg + radius/2, 0)) # Base point for each month circle
legMonthLabelBasePts.append(rc.Geometry.Point3d(orgX + 87, leg + 0.0, 0)) # Base point for each month name
leg += -3
# Draw the Months circles and names
cP, dataPolyline = circPoints(circCenter[count], radius, segments)
circPolyline.append(outlineCurve(dataPolyline)) # This is the element to use for coloring the legend
legMesh = rc.Geometry.Mesh()
legMesh.Append(rc.Geometry.Mesh.CreateFromBrep(circPolyline[count])[0])
legMesh.VertexColors.CreateMonotoneMesh(monthColors[count])
legMonthsCircle.append(legMesh)
legMonthLabels.extend(text2srf([text], [legMonthLabelBasePts[count]], legendFont, legendFontSize*0.75)[0])
# ************** End Legend of Month colors ***********
#Bring all text and curves together in one list.
chartLegend = []
for item in finalLegPolyline:
chartLegend.append(item)
for item in legStrategyPolygons:
chartLegend.append(item)
for item in legStrategyLabels:
chartLegend.append(item)
#for item in legend: # This is for Chris's way - Keep commented/uncommented together with the block above
# chartLegend.append(item)
for item in legComfCircle:
chartLegend.append(item)
for item in legComfLabels:
chartLegend.append(item)
for item in legMonthsCircle:
chartLegend.append(item)
for item in legMonthLabels:
chartLegend.append(item)
return chartLegend
def text2srf(text, textPt, font, textHeight):
# Thanks to Giulio Piacentino for his version of text to curve
textSrfs = []
for n in range(len(text)):
plane = rc.Geometry.Plane(textPt[n], rc.Geometry.Vector3d(0,0,1))
if type(text[n]) is not str:
preText = rc.RhinoDoc.ActiveDoc.Objects.AddText(`text[n]`, plane, textHeight, font, True, False)
else:
preText = rc.RhinoDoc.ActiveDoc.Objects.AddText( text[n], plane, textHeight, font, True, False)
postText = rc.RhinoDoc.ActiveDoc.Objects.Find(preText)
TG = postText.Geometry
crvs = TG.Explode()
# join the curves
joindCrvs = rc.Geometry.Curve.JoinCurves(crvs)
# create the surface
srfs = rc.Geometry.Brep.CreatePlanarBreps(joindCrvs)
extraSrfCount = 0
# = generate 2 surfaces
if "=" in text[n]: extraSrfCount += -1
if ":" in text[n]: extraSrfCount += -1
if len(text[n].strip()) != len(srfs) + extraSrfCount:
# project the curves to the place in case number of surfaces
# doesn't match the text
projectedCrvs = []
for crv in joindCrvs:
projectedCrvs.append(rc.Geometry.Curve.ProjectToPlane(crv, plane))
srfs = rc.Geometry.Brep.CreatePlanarBreps(projectedCrvs)
textSrfs.append(srfs)
rc.RhinoDoc.ActiveDoc.Objects.Delete(postText, True) # find and delete the text
return textSrfs
#def strategyDraw_Calc(name, shiftFactor, *points): # The x is for a list of points passed one by one
def strategyDraw_Calc(name, shiftFactor, points, hourPoints, tol, dryBulbTemperature, totalHrs, cR, cG, cB):
strategyID = [0 for x in range(totalHrs)]
strategyPt = []
for x in range(0, len(points)):
strategyPt.append(rc.Geometry.Point3d(points[x][0], points[x][1] + shiftFactor, points[x][2] + 0.05))
#Draw Polygon
strategyPolyline = rc.Geometry.PolylineCurve(strategyPt)
#meshedC = rc.Geometry.Mesh.CreateFromPlanarBoundary(strategyPolyline.ToNurbsCurve(), meshingP)
#Turn the comfort curve into a brep that will show up well on the chart.
#finalStrategyPolyline = []
finalStrategyPolyline = outlineCurve(strategyPolyline)
strategyPolygon = rc.Geometry.Brep.CreatePlanarBreps(strategyPolyline)[0]
### try: strategyPolygon = rc.Geometry.Brep.CreatePlanarBreps(strategyPolyline)[0]
### except: strategyPolygon = None
#Start STRATEGY statistics ********************************
#Find the hours in the STRATEGY.
strategyList = []
n = 0
for point in hourPoints:
containment = strategyPolyline.Contains(point, rc.Geometry.Plane.WorldXY, tol)
if str(containment) == 'Inside':
strategyList.append(1)
strategyID[n] = 1
else: strategyList.append(0)
n += 1
#Find the STRATEGY Percentage.
numStrHrs = sum(strategyList)
totalHrs = len(dryBulbTemperature)
strPercent = (numStrHrs / totalHrs)*100
#End STRATEGY statistics ********************************
#return numStrHrs, strPercent, strategyID, strategyPolygon
return numStrHrs, strPercent, strategyID, strategyPolygon, finalStrategyPolyline
def drawColorResults(czPt, pshPt, stPt, colorCZ, colorPSH, colorST):
alpha = 255 # 0 = Transparent, 255 = Opaque ... supposed to be so
colResMesh = []
for count in range(3):
if count == 0:
color = colorCZ
#Draw each Result
dataPolyline = rc.Geometry.PolylineCurve(czPt)
meshedC = rc.Geometry.Mesh.CreateFromPlanarBoundary(dataPolyline.ToNurbsCurve(), meshingP)
elif count == 1:
color = colorPSH
dataPolyline = rc.Geometry.PolylineCurve(pshPt)
meshedC = rc.Geometry.Mesh.CreateFromPlanarBoundary(dataPolyline.ToNurbsCurve(), meshingP)
elif count == 2:
color = colorST
dataPolyline = rc.Geometry.PolylineCurve(stPt)
meshedC = rc.Geometry.Mesh.CreateFromPlanarBoundary(dataPolyline.ToNurbsCurve(), meshingP)
# generate the color list for all the vertices
repeatedColors = []
for face in range(meshedC.Faces.Count):
repeatedColors.append(color)
# use ladybug functions to color the circle
##colMesh = lb_visualization.colorMesh(repeatedColors, meshedC)
if count == 0 : colMesh1 = lb_visualization.colorMesh(repeatedColors, meshedC)
elif count == 1 : colMesh2 = lb_visualization.colorMesh(repeatedColors, meshedC)
elif count == 2 : colMesh3 = lb_visualization.colorMesh(repeatedColors, meshedC)
####colResMesh.append(colMesh)
#colResMesh[count] = colMesh
#print count, colMesh
#Bring all legend and text together in one list.
##colMeshGGraph = []
#for item in colResMesh:
#for item in colMesh:
#colMeshGGraph.append(item)
##return colMesh
####return colResMesh
return colMesh1, colMesh2, colMesh3
##return colMeshGGraph
def graphResults(orgX, orgY, orgZ, gridStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, lb_preparation, legendFontSize, legendFont, strategiesColors):
czBot = orgY
czTop = orgY + strategyPercent[0]
pshBot = czTop
pshTop = czTop + strategyPercent[1]
firstX = orgX
colorCZ = strategiesColors[0] # CZ
colorPSH = strategiesColors[1] # PSH
stResGraph = []
seeChartResults = []
for count in range(2, len(strategyNames)): # Start from third strategy. First 2 are: CZ and PSH which are fixed
if count == 0: colorST = strategiesColors[0] # CZ
elif count == 1: colorST = strategiesColors[1] # PSH
elif count == 2: colorST = strategiesColors[2] # EC
elif count == 3: colorST = strategiesColors[3] # HTM
elif count == 4: colorST = strategiesColors[4] # NV
else : colorST = [255, 255, 255]
czPt = []
pshPt = []
stPt = []
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czBot, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 8.0, czBot, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 8.0, czTop, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czTop, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 8.0, pshBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 8.0, pshTop, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshBot, 0.0))
stTop = pshTop + strategyPercent[count]
stPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 8.0, pshTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 8.0, stTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 2.0, stTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
firstX = firstX + 10.0
#Draw Polygon
strategyPolyline = rc.Geometry.PolylineCurve(stPt)
stResGraph.append(strategyPolyline)
strategyPolyline = rc.Geometry.PolylineCurve(pshPt)
stResGraph.append(strategyPolyline)
strategyPolyline = rc.Geometry.PolylineCurve(czPt)
stResGraph.append(strategyPolyline)
#print colorCZ, colorPSH, colorST, colorKK
##colorResultsMesh = drawColorResults(czPt, pshPt, stPt, colorCZ, colorPSH, colorST) # Calling routine for drawing the results in color
##stResGraph.append(colorResultsMesh)
colorResultsMesh1, colorResultsMesh2, colorResultsMesh3 = drawColorResults(czPt, pshPt, stPt, colorCZ, colorPSH, colorST) # Calling routine for drawing the results in color
stResGraph.append(colorResultsMesh1)
stResGraph.append(colorResultsMesh2)
stResGraph.append(colorResultsMesh3)
#Bring all legend and text together in one list.
##seeChartResults = []
for item in stResGraph:
seeChartResults.append(item)
##return seeChartResults
return stResGraph
def graphResultsMonth(orgX, orgY, orgZ, gridStep, monthStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, strategyMonth, lb_preparation, legendFontSize, legendFont, strategiesColors):
colorCZ = strategiesColors[0] # CZ
colorPSH = strategiesColors[1] # PSH
y = float()
#Months titles loop
step = 0
seeChartResults = []
stResGraph = []
for month in range(0, len(MonthNames)):
step += monthStep
czBot = orgY
for line in strategyMonth[0][month][0:1]:
if line == 0:
line = 0.01
czTop = orgY + line # CZ & Percentage in the list
pshBot = czTop
for line in strategyMonth[1][month][0:1]:
if line == 0:
line = 0.01
pshTop = czTop + line # PSH & Percentage in the list
firstX = orgX + step
##for count in range(0, len(StNames)):
for count in range(2, len(strategyNames)):
#for count in range(2, len(strategyNames)): # Start from third strategy. First 2 are: CZ and PSH which are fixed
if count == 0: colorST = strategiesColors[0] # CZ
elif count == 1: colorST = strategiesColors[1] # PSH
elif count == 2: colorST = strategiesColors[2] # EC
elif count == 3: colorST = strategiesColors[3] # HTM
elif count == 4: colorST = strategiesColors[4] # NV
else : colorST = [255, 255, 255]
czPt = []
pshPt = []
stPt = []
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czBot, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 8.0, czBot, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 8.0, czTop, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czTop, 0.0))
czPt.append(rc.Geometry.Point3d(firstX + 2.0, czBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 8.0, pshBot, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 8.0, pshTop, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
pshPt.append(rc.Geometry.Point3d(firstX + 2.0, pshBot, 0.0))
for line in strategyMonth[count][month][0:1]:
if line == 0:
line = 0.01
stTop = pshTop + float(line) # Strategy & Percentage in the list
#stTop = pshTop + strategyPercent[count]
stPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 8.0, pshTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 8.0, stTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 2.0, stTop, 0.0))
stPt.append(rc.Geometry.Point3d(firstX + 2.0, pshTop, 0.0))
firstX = firstX + 10.0
#Draw Polygon
strategyPolyline = rc.Geometry.PolylineCurve(stPt)
stResGraph.append(strategyPolyline)
strategyPolyline = rc.Geometry.PolylineCurve(pshPt)
stResGraph.append(strategyPolyline)
strategyPolyline = rc.Geometry.PolylineCurve(czPt)
stResGraph.append(strategyPolyline)
##colorResultsMesh = drawColorResults(czPt, pshPt, stPt, colorCZ, colorPSH, colorST) # Calling routine for drawing the results in color
##stResGraph.append(colorResultsMesh)
colorResultsMesh1, colorResultsMesh2, colorResultsMesh3 = drawColorResults(czPt, pshPt, stPt, colorCZ, colorPSH, colorST) # Calling routine for drawing the results in color
stResGraph.append(colorResultsMesh1)
stResGraph.append(colorResultsMesh2)
stResGraph.append(colorResultsMesh3)
#Bring all legend and text together in one list.
for item in stResGraph:
seeChartResults.append(item)
#return stResGraph
return seeChartResults
def showResults(basePoint_, mainLegHeight, strategyNames, strategyPercent, strategyHours, strategyMonth, \
lowB, highB, numSeg, customColors, legendBasePoint, legendScale, legendFont, legendFontSize, lb_preparation, lb_visualization, strategiesColors):
gridLines = []
resultsChart = []
shiftRes = 45
orgX = int(shiftRes + legendBasePoint[0])
orgY = int(legendBasePoint[1])
orgZ = int(legendBasePoint[2])
monthStep = 40
resLimitX = monthStep * 13
boundBoxX = 40
boundBoxY = abs(orgY) + 100
scaleLegBox = mainLegHeight / 100
#print 'BB = ', boundBoxX, boundBoxY, orgX, orgY, mainLegHeight, scaleLegBox
gridStep = 10
# Grid Lines *******************************
#xLineValue = range(orgX, orgX + 50, 40)
xLineValue = range(orgX, orgX + resLimitX + monthStep, monthStep)
yLineValue = range(orgY, orgY + 110, gridStep)
#for m in range(0, len(MonthNames)+1):
for value in xLineValue: #Bottom to Top lines
gridLines.append(rc.Geometry.Line(value, orgY, 0, value, (100 + orgY), 0))
#print value-orgX
for value in yLineValue: # Left to right lines
#gridLines.append(rc.Geometry.Line(orgX, value, 0, orgX + 40, value, 0))
gridLines.append(rc.Geometry.Line(orgX, value, 0, orgX + resLimitX, value, 0))
# End Grid Lines ****************************
# Print Title and legends in grid: X/Y axis ****************************
location = 'Results'
resText = createResultsLegend(orgX, orgY, orgZ, gridStep, monthStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, lb_preparation, legendFontSize, legendFont, strategiesColors)
# This is for Year results --- in the future this and the monthly results should be unified in one single routine
seeChartResults = graphResults(orgX, orgY, orgZ, gridStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, lb_preparation, legendFontSize, legendFont, strategiesColors)
# This is for monthly results
seeMonthChartResults = graphResultsMonth(orgX, orgY, orgZ, gridStep, monthStep, xLineValue, yLineValue, location, strategyNames, strategyPercent, strategyHours, strategyMonth, lb_preparation, legendFontSize, legendFont, strategiesColors)
# Collect all Results, Legend and Gridlines in one list.
for item in gridLines:
resultsChart.append(item)
for item in resText:
resultsChart.append(item)
for item in seeChartResults:
resultsChart.append(item)
for item in seeMonthChartResults:
resultsChart.append(item)
bPt = rc.Geometry.Point3d(20., -2., 0.)
#bPtSc = rc.Geometry.Point3d(0., 0., 0.)
scale = rc.Geometry.Transform.Scale(bPt, scaleLegBox)
move = rc.Geometry.Transform.Translation(bPt.X, bPt.Y, bPt.Z)
transformMtx = scale * move
for geo in resultsChart: geo.Transform(transformMtx)
return resultsChart
def circPoints(centerPt, radius, secs):
dt = 2.0 * math.pi / secs
div = 2*math.pi/secs
ang = 0
cP = []
for n in range(0, secs+1): # The +1 is to make the last point equal to the first, so the polygon will be closed
if n == secs+1:
ang = 0
ptX = (centerPt.X + radius * math.cos(ang))
ptY = (centerPt.Y + radius * math.sin(ang))
ptZ = (centerPt.Z)+ 0.1
cP.append(rc.Geometry.Point3d(ptX, ptY, ptZ))
ang += div
#Draw each climatic data
dataPolyline = rc.Geometry.PolylineCurve(cP)
return cP, dataPolyline
def drawPolygon(centerPt, radius, secs, color):
dt = 2.0 * math.pi / secs
div = 2*math.pi/secs
ang = 0
cP = []
for n in range(0, secs+1): # The +1 is to make the last point equal to the first, so the polygon will be closed
if n == secs+1:
ang = 0
ptX = (centerPt.X + radius * math.cos(ang))
ptY = (centerPt.Y + radius * math.sin(ang))
ptZ = (centerPt.Z)+ 0.1
cP.append(rc.Geometry.Point3d(ptX, ptY, ptZ))
ang += div
#Draw each climatic data
dataPolyline = rc.Geometry.PolylineCurve(cP)
meshedC = rc.Geometry.Mesh.CreateFromPlanarBoundary(dataPolyline.ToNurbsCurve(), meshingP)
# generate the color list for all the vertices
repeatedColors = []
for face in range(meshedC.Faces.Count):
repeatedColors.append(color)
# use ladybug functions to color the circle
colMesh = lb_visualization.colorMesh(repeatedColors, meshedC)
return colMesh
# Routine that gets monthly statistics and Min/Max values
def monthCalcs(totalHrs, comfID, pshID, ecID, htmID, nvID, dryBulbTemperature, relativeHumidity, monthPoints, monthColors, comfortNOcomfortColors):
stM = 0
pointMinMax = []
geoMinMax = []
allMinMax = []
comfID_m = []
pshID_m = []
ecID_m = []
htmID_m = []
nvID_m = []
colByComfort = []
colByMonth = []
for m in range(0, len(monthPoints)):
geoMinMax.append([])
comfID_m.append([])
pshID_m.append([])
ecID_m.append([])
htmID_m.append([])
nvID_m.append([])
cfZ = pshZ = ecZ = htmZ = nvZ = 0
colByComfort.append([])
colByMonth.append([])
minTemp = 50.0
maxTemp = -50.0
minHum = 100.0
maxHum = 0.0
monthHours = len(monthPoints[m])
for n in range(stM, stM + len(monthPoints[m])): #Start the counter from the previos month hour
#for temp in dryBulbTemperature:
if dryBulbTemperature[n] < minTemp:
minTemp = dryBulbTemperature[n]
if dryBulbTemperature[n] > maxTemp:
maxTemp = dryBulbTemperature[n]
if relativeHumidity[n] < minHum:
minHum = relativeHumidity[n]
if relativeHumidity[n] > maxHum:
maxHum = relativeHumidity[n]
# Summing up the points for each strategy
if comfID[n] == 1: cfZ += 1
if pshID[n] == 1: pshZ += 1
if ecID[n] == 1: ecZ += 1
if htmID[n] == 1: htmZ += 1
if nvID[n] == 1: nvZ += 1
# See if the point is in any comfort strategy and the use a color for Comfort/NotComfort
if comfID[n] == 1 or pshID[n] == 1 or ecID[n] == 1 or htmID[n] == 1 or nvID[n] == 1:
colByComfort[m].append(comfortNOcomfortColors[1])
else:
colByComfort[m].append(comfortNOcomfortColors[0])
#colByMonth[m].append(colorP)
colByMonth[m].append(monthColors[m])
# Calculate the effective percentage per month and number of comfort for each strategy
czM_Perc = cfZ * 100 / len(monthPoints[m])
comfID_m[m] = [czM_Perc, cfZ, monthHours]
pshM_Perc = pshZ * 100 / len(monthPoints[m])
pshID_m[m] = [pshM_Perc, pshZ, monthHours]
ecM_Perc = ecZ * 100 / len(monthPoints[m])
ecID_m[m] = [ecM_Perc, ecZ, monthHours]
htmM_Perc = htmZ * 100 / len(monthPoints[m])
htmID_m[m] = [htmM_Perc, htmZ, monthHours]
nvM_Perc = nvZ * 100 / len(monthPoints[m])
nvID_m[m] = [nvM_Perc, nvZ, monthHours]
#print m, comfID_m[m], pshID_m[m], ecID_m[m], htmID_m[m], nvID_m[m]
pointMinMax.append([minTemp * 2, maxTemp * 2, minHum, maxHum])
stM = stM + len(monthPoints[m])
geoMinMax[m].append(rc.Geometry.Line(pointMinMax[m][3], pointMinMax[m][0], 0, pointMinMax[m][2], pointMinMax[m][1], 0)) #Line connecting min-max
colorP = System.Drawing.Color.FromArgb(0, 0, 0)
minPt = rc.Geometry.Point3d(pointMinMax[m][3], pointMinMax[m][0], 0)
maxPt = rc.Geometry.Point3d(pointMinMax[m][2], pointMinMax[m][1], 0)
dataPolygon = drawPolygon (minPt, .8, 4, colorP)
geoMinMax[m].append(dataPolygon)
dataPolygon = drawPolygon (maxPt, .8, 4, colorP)
geoMinMax[m].append(dataPolygon)
#geoMinMax[m].append(rc.Geometry.Circle(minPt, 0.5))
#geoMinMax[m].append(rc.Geometry.Circle(maxPt, 0.5))
# Collect all monthly results for all strategies under one variable - in the future all calculations should be done under this variable
strategyMonth = []
for count in range(0, len(StNames)):
strategyMonth.append([])
for month in range(0, len(MonthNames)):
strategyMonth[count].append([])
if StNames[count] == 'ComfortZone':
strategyMonth[count][month] = comfID_m[month]
elif StNames[count] == 'PassiveSolarHeating':
strategyMonth[count][month] = pshID_m[month]
elif StNames[count] == 'EvaporativeCooling':
strategyMonth[count][month] = ecID_m[month]
elif StNames[count] == 'HighTermalMass+NightVent':
strategyMonth[count][month] = htmID_m[month]
elif StNames[count] == 'NaturalVentilation':
strategyMonth[count][month] = nvID_m[month]
#Bring all set of lines and circles per month together in one list.
for item in geoMinMax:
allMinMax.append(item)
#return allMinMax, strategyMonth, comfID_m, pshID_m, ecID_m, htmID_m, nvID_m
return allMinMax, strategyMonth, colByComfort, colByMonth
def monthForHour(hour):
for m in range(0, len(MonthNames)+1):
if int(hour) < ((MonthDays[m] * 24)):
hrCol = m
break
return hrCol
def seasonHours(period, totalHrs):
x = period.split(' ')
season = [[],[]]
season[0].append(int(x[0]) - 1) # Month
season[0].append(int(x[1]) - 1) # Day
season[0].append(int(x[2]) - 1) # Hour
season[1].append(int(x[3]) - 1) # Month
season[1].append(int(x[4]) - 1) # Day
season[1].append(int(x[5]) - 1) # Hour
return season
def calcHourYear(season, i):
# print 'in calcHourYear ----- ----- ----- season is ', season[i]
JulDay = MonthDays[season[i][0]] + season[i][1]
HourYear = (JulDay) * 24 + season[i][2]
# print '1. For month %d day %d hour %d Julian Day is %d HourYear is %d \n' % (season[i][0], season[i][1], season[i][2], JulDay, HourYear)
return JulDay, HourYear
def assignHour2Season(start, end, totalHrs, iSeason, seasonID, dbtValue, rhValue):
# seasonHrs = [[],[],[]]
if start > end:
hrs = totalHrs - start
numSeasHrs = hrs + end
else:
numSeasHrs = end - start
# print 'St, End, totalHrs, numHrs, Season ', start, end, totalHrs, numSeasHrs, iSeason
##############################
if start > end:
hrs = totalHrs - start
numSeasHrs = hrs + end
SeasHrs = range(start, totalHrs) + range(0, end + 1)
else:
numSeasHrs = end - start
SeasHrs = range(start, end + 1)
##############################
##for id in range(start, end): #to iterate between start to end
for id, m in enumerate(SeasHrs):
# print 'Serial id - Choosen Hour m ', id, m
seasonID[m] = iSeason
# return seasonHrs, seasonID
return seasonID
def setSeasonID(dryBulbTemperature, relativeHumidity, totalHrs):
#Define Model Tolerance
tol = sc.doc.ModelAbsoluteTolerance
#Plot Point Values.
#******************
dbtValue = []
seasonID = [0 for x in range(totalHrs)]
for temp in dryBulbTemperature:
dbtValue.append(temp)
rhValue = []
relativeHumidity = _relativeHumidity[7:]
for temp in relativeHumidity:
rhValue.append(temp)
if analysisPeriodWinter_:
stMonth, stDay, stHour, endMonth, endDay, endHour = lb_preparation.readRunPeriod(analysisPeriodWinter_, False)
stringWinter = str(stMonth) + ' ' + str(stDay) + ' ' + str(stHour) + ' ' + str(endMonth) + ' ' + str(endDay) + ' ' + str(endHour)
periodWinter = stringWinter
winterSeason = seasonHours(periodWinter, totalHrs)
# # Calling function to calculate Day of Year and Hour of Year for START-END period
JulDay1, hourWinterStart = calcHourYear(winterSeason, 0)
JulDay2, hourWinterEnd = calcHourYear(winterSeason, 1)
# seasonHrsWinter, seasonID = assignHour2Season(hourWinterStart, hourWinterEnd, totalHrs, 3, seasonID, dbtValue, rhValue)
seasonID = assignHour2Season(hourWinterStart, hourWinterEnd, totalHrs, 3, seasonID, dbtValue, rhValue)
## print 'Winter: Start %d End %d Total Hours %d \n' % (hourWinterStart, hourWinterEnd, hourWinterEnd - hourWinterStart + 1)
# print 'seasonHrsWinter ', seasonHrsWinter
if analysisPeriodSummer_:
stMonth, stDay, stHour, endMonth, endDay, endHour = lb_preparation.readRunPeriod(analysisPeriodSummer_, False)
stringSummer = str(stMonth) + ' ' + str(stDay) + ' ' + str(stHour) + ' ' + str(endMonth) + ' ' + str(endDay) + ' ' + str(endHour)
periodSummer = stringSummer
summerSeason = seasonHours(periodSummer, totalHrs)
# Calling function to calculate Day of Year and Hour of Year for START-END period
JulDay1, hourSummerStart = calcHourYear(summerSeason, 0)
JulDay2, hourSummerEnd = calcHourYear(summerSeason, 1)
# seasonHrsSummer, seasonID = assignHour2Season(hourSummerStart, hourSummerEnd, totalHrs, 1, seasonID, dbtValue, rhValue)
seasonID = assignHour2Season(hourSummerStart, hourSummerEnd, totalHrs, 1, seasonID, dbtValue, rhValue)
## print 'Summer: Start %d End %d Total Hours %d \n' % (hourSummerStart, hourSummerEnd, hourSummerEnd - hourSummerStart + 1)
# print 'seasonHrsSummer ', seasonHrsSummer
# f = open("C:\WorkingFolder\Courses\Rhino-Grasshopper-Diva\PythonStuff\BioclimaticChart\dataTMP.csv","wr")
# f.write('SeasonID,Temperature,RelativeHumidity\n')
# for m in range(0, totalHrs):
# string = str(seasonID[m])+', '+str(dbtValue[m])+', '+str(rhValue[m])+','
# f.write(string+'\n')
# f.close()
# print 'winterSeason ', winterSeason
# print 'summerSeason ', summerSeason
## return seasonID, winterSeason, summerSeason
return seasonID
def createFrequencyMesh(orgY, dryBulbTemperature, relativeHumidity, cullMesh, lb_preparation, lb_visualization):
#Read Legend Parameters
if legendPar_ == None:
legendPar = [None, None, None, None, None, None, None, None]
else: legendPar = legendPar_
lowB, highB, numSeg, customColors, legendBasePoint, legendScale,\
legendFont, legendFontSize, legendBold = lb_preparation.readLegendParameters(legendPar, False)
hourPts = []
for count, ratio in enumerate(relativeHumidity):
hourPts.append(rc.Geometry.Point3d(dryBulbTemperature[count], relativeHumidity[count], 0))
#Make a mesh.
gridSize = 5
joinedMesh = rc.Geometry.Mesh()
meshPoints = []
for yVal in range(orgY, 100, gridSize):
#####for yVal in range(0, 100, gridSize):
for xVal in range(0, 100, gridSize):
point1 = rc.Geometry.Point3d(xVal, yVal, 0)
point2 = rc.Geometry.Point3d(xVal+gridSize, yVal, 0)
point3 = rc.Geometry.Point3d(xVal+gridSize, yVal+gridSize, 0)
point4 = rc.Geometry.Point3d(xVal, yVal+gridSize, 0)
meshPoints.append([point1, point2, point3, point4])
for list in meshPoints:
mesh = rc.Geometry.Mesh()
for point in list:
mesh.Vertices.Add(point)
mesh.Faces.AddFace(0, 1, 2, 3)
joinedMesh.Append(mesh)
polyCurveList = []
for list in meshPoints:
pointList = [list[0], list[1], list[2], list[3],list[0]]
polyLine = rc.Geometry.PolylineCurve(pointList)
polyCurveList.append(polyLine)
#Make a list to hold values for all of the mesh faces.
meshFrequency = []
for count, value in enumerate(range(orgY, 100, 5)):
####for count, value in enumerate(range(0, 100, 5)):
meshFrequency.append([])
for face in range(0, 100, 5):
meshFrequency[count].append([])
def getHumidityIndex(hour):
if relativeHumidity[hour] < 5: index = 0
elif relativeHumidity[hour] < 10: index = 1
elif relativeHumidity[hour] < 15: index = 2
elif relativeHumidity[hour] < 20: index = 3
elif relativeHumidity[hour] < 25: index = 4
elif relativeHumidity[hour] < 30: index = 5
elif relativeHumidity[hour] < 35: index = 6
elif relativeHumidity[hour] < 40: index = 7
elif relativeHumidity[hour] < 45: index = 8
elif relativeHumidity[hour] < 50: index = 9
elif relativeHumidity[hour] < 55: index = 10
elif relativeHumidity[hour] < 60: index = 11
elif relativeHumidity[hour] < 65: index = 12
elif relativeHumidity[hour] < 70: index = 13
elif relativeHumidity[hour] < 75: index = 14
elif relativeHumidity[hour] < 80: index = 15
elif relativeHumidity[hour] < 85: index = 16
elif relativeHumidity[hour] < 90: index = 17
elif relativeHumidity[hour] < 95: index = 18
else: index = 19
return index
addGridToIndex = abs(orgY/2)
module = gridSize/2
for hour, temp in enumerate(dryBulbTemperature):
tempIndex = int((float(temp)+addGridToIndex) / module)
humIndex = getHumidityIndex(hour)
if tempIndex < 28:
meshFrequency[tempIndex][humIndex].append(1)
#Sum all of the lists together to get the frequency.
finalMeshFrequency = []
for templist in meshFrequency:
for humidlist in templist:
finalMeshFrequency.append(sum(humidlist))
#Get a list of colors
colors = lb_visualization.gradientColor(finalMeshFrequency, lowB, highB, customColors)
# color the mesh faces.
joinedMesh.VertexColors.CreateMonotoneMesh(System.Drawing.Color.Gray)
for srfNum in range (joinedMesh.Faces.Count):
joinedMesh.VertexColors[4 * srfNum + 0] = colors[srfNum]
joinedMesh.VertexColors[4 * srfNum + 1] = colors[srfNum]
joinedMesh.VertexColors[4 * srfNum + 3] = colors[srfNum]
joinedMesh.VertexColors[4 * srfNum + 2] = colors[srfNum]
# Remove the mesh faces that do not have any hour associated with them.
if cullMesh == True:
cullFaceIndices = []
for count, freq in enumerate(finalMeshFrequency):
if freq == 0:
cullFaceIndices.append(count)
joinedMesh.Faces.DeleteFaces(cullFaceIndices)
#Return everything that's useful.
return hourPts, joinedMesh, finalMeshFrequency
def raggedListToDataTree(rl):
result = DataTree[System.Object]()
for i, leg in enumerate(rl):
#for i in range(len(rl)):
path = GH_Path(i)
temp = []
for j, leg in enumerate(rl[i]):
temp.append(rl[i][j])
#print 'J ', j, rl[i][j]
result.AddRange(temp, path)
return result
def checkComfortOrNot(strategyNames, totalHrs, comfID, pshID, ecID, htmID, nvID, epwData, epwStr):
totalComfortOrNot = []
comfHours = 0
for m in range(0, totalHrs):
if comfID[m] == 1 or pshID[m] == 1 or ecID[m] == 1 or htmID[m] == 1 or nvID[m] == 1:
#if comfID[m] == 1:
#if pshID[m] == 1:
#if ecID[m] == 1:
#if htmID[m] == 1:
#if nvID[m] == 1:
totalComfortOrNot.append(1)
comfHours += 1
else: totalComfortOrNot.append(0)
percComfTotal = comfHours / totalHrs * 100
##print 'Comfortable Hours %d, which are %.2f%%' % (comfHours, percComfTotal)
#$#$comfortResults.append('CMF: ' + str(hrsCMF) + ' hours, ' + str(cmfPercent) + '%')
totalComfortOrNot.insert(0, epwStr[6])
totalComfortOrNot.insert(0, epwStr[5])
totalComfortOrNot.insert(0, epwStr[4])
totalComfortOrNot.insert(0, "Boolean Value")
combString = '%.1f%% (%d hrs) Comfortable Hours in All Strategies, ' % (percComfTotal, comfHours)
totalComfortOrNot.insert(0, combString)
totalComfortOrNot.insert(0, epwStr[1])
totalComfortOrNot.insert(0, epwStr[0])
#Calculate wheather each strategy is in comfort or not for each hour. For output "strategyOrNot"
#---------------------------------------------
strategyOrNotList = []
comfHoursStrategy = []
percComfStrategy = []
for count, text in enumerate(strategyNames):
if count == 0: stComf = list(comfID) # CZ
elif count == 1: stComf = list(pshID) # PSH
elif count == 2: stComf = list(ecID) # EC
elif count == 3: stComf = list(htmID) # HTM
elif count == 4: stComf = list(nvID) # NV
comfHours = 0
strategyOrNotList.append([])
for m in range(0, totalHrs):
if stComf[m] == 1:
strategyOrNotList[count].append(1)
comfHours += 1
else: strategyOrNotList[count].append(0)
comfHoursStrategy.append(comfHours)
percComfStrategy.append(comfHoursStrategy[count] / totalHrs * 100)
#print 'Comfortable Hours %d, which are %.2f%%' % (comfHoursStrategy[count], percComfStrategy[count])
strategyOrNotList[count].insert(0, epwStr[6])
strategyOrNotList[count].insert(0, epwStr[5])
strategyOrNotList[count].insert(0, epwStr[4])
strategyOrNotList[count].insert(0, "Boolean Value")
combString = '%.1f%% (%d hrs) Comfortable Hours in %s, ' % (percComfStrategy[count], comfHoursStrategy[count], text)
strategyOrNotList[count].insert(0, combString)
strategyOrNotList[count].insert(0, epwStr[1])
strategyOrNotList[count].insert(0, epwStr[0])
#---------------------------------------------
return totalComfortOrNot, strategyOrNotList
# ****** END DEF *********************
# ************************************
def main(epwData, epwStr):
if sc.sticky.has_key('ladybug_release'):
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_visualization = sc.sticky["ladybug_ResultVisualization"]()
# Read the legend parameters.
lowB, highB, numSeg, customColors, legendBasePoint, legendScale, \
legendFont, legendFontSize, legendBold = lb_preparation.readLegendParameters(legendPar_, False)
if cullMesh_:
cullMesh = cullMesh_
else:
cullMesh = False
# Fix MET and Clo factor for strategies position.
lowActivity = 3
highActivity = 4
defMet = 1.3
winterClo = 2
SummerClo = 5
defClo = 0.8
if basePoint_ != None:
basePoint = basePoint_
xP = 0.0 #basePoint_.X
yP = 0.0 #basePoint_.Y
zP = 0.0 #basePoint_.Z
else:
basePoint = rc.Geometry.Point3d(0,0,0)
xP, yP, zP = 0.0, 0.0, 0.0
if metabolicRate_ :
metRate = metabolicRate_
if metabolicRate_ >= 2.0:
actShift = highActivity
else:
actShift = lowActivity
shiftFactorMet = (defMet - metRate) * actShift # Proof of concept. Need to fix later
else:
metRate_ = defMet
shiftFactorMet = 0
if clothingLevel_:
cloLevel = clothingLevel_
shiftFactorClo = (defClo - clothingLevel_) * winterClo # Proof of concept. Need to fix later
else:
cloLevel_ = 0.8
shiftFactorClo = 0
shiftFactor = shiftFactorMet + shiftFactorClo
#print 'shiftFactorMet %.2f, shiftFactorClo %.2f, shiftFactor %.2f' % (shiftFactorMet, shiftFactorClo, shiftFactor)
#Define color palettes for outputs and legends.
monthColors, comfortNOcomfortColors, strategiesColors = colors()
#Make chart curves.
#Plot Point Values.
#******************
strategyNames = []
strategyNames.append('Comfort')
strategyNames.append('Passive Solar Heating')
strategyNames.append('Evaporative Cooling')
strategyNames.append('Thermal Mass + Night Vent')
strategyNames.append('Natural Ventilation')
chartHourPoints = []
hourPointColorsByComfort = []
hourPointColorsByMonth = []
min_maxPoints = []
monthPoints = []
comfort_strategyPolygons = []
yPointValue = []
dbtValue = []
location = _dryBulbTemperature[1:2][0]
dryBulbTemperature = _dryBulbTemperature[7:]
totalHrs = len(dryBulbTemperature)
minTemp = 0.0
for temp in dryBulbTemperature:
dbtValue.append(temp)
if temp < minTemp:
minTemp = temp
#print 'minTemp ', minTemp
yPointValue.append(temp*2 + yP)
gridSize = 5.0
addGrid = (int(math.ceil(abs(minTemp / gridSize))) * (-1) * gridSize) * 2 # This is for negative temperatures
#print 'minTemp %.1f addGrid %d' % (minTemp, addGrid)
# Grid Lines *******************************
gridLines = []
orgX = int(0 + xP)
orgY = int(int(addGrid) + yP)
orgZ = int(0 + zP)
gridStep = 10
#print 'orgY ', orgY
xLineValue = range(orgX, orgX + 110, gridStep)
yLineValue = range(orgY, 110, gridStep)
#starLine = rc.Geometry.Line(0, 0, 0, 0, 100, 0)
for value in xLineValue:
gridLines.append(rc.Geometry.Line(value+xP, int(addGrid)+yP, 0+zP, value+xP, 100+yP, 0+zP))
for value in yLineValue: # Left to right lines
gridLines.append(rc.Geometry.Line(0+xP, value+yP, 0+zP, 100+xP, value+yP, 0+zP))
# End Grid Lines ****************************
# Print Title and legends in grid: X/Y axis ****************************
chartLayout = createChartLayout(orgX, orgY, orgZ, location, legendFont, legendFontSize)
xPointValue = []
rhValue = []
relativeHumidity = _relativeHumidity[7:]
for temp in relativeHumidity:
rhValue.append(temp)
xPointValue.append(temp + xP)
#stringSummer, stringWinter, seasonID = setSeasonID(dryBulbTemperature, relativeHumidity, totalHrs)
seasonID = setSeasonID(dryBulbTemperature, relativeHumidity, totalHrs)
# print 'SeasonID ', seasonID # Winter = 3, Summer = 1
hourPoints = []
hourPointsCirc = []
##hourColor = []
colPnt1 = []
colPnt2 = []
colPnt3 = []
colPnt4 = []
colPnt5 = []
colPnt6 = []
colPnt7 = []
colPnt8 = []
colPnt9 = []
colPnt10 = []
colPnt11 = []
colPnt12 = []
dataRadius = 0.4
nVert = 4
# for count, xVal in enumerate(xPointValue):
# centerPt = rc.Geometry.Point3d(xVal+xP, yPointValue[count]+yP, 0+zP)
for hour in range(0, totalHrs):
##for hour in range(0, totalHrs - 1):
# Colors according to Season
if seasonID[hour] == 3: # Winter
colorP = [50, 50, 255]
elif seasonID[hour] == 1: # Summer
colorP = [255, 255, 50]
centerPt = rc.Geometry.Point3d(xPointValue[hour], yPointValue[hour]+yP, 0+zP)
hourPoints.append(rc.Geometry.Point3d(centerPt))
hourPointsCirc = rc.Geometry.Circle(centerPt, 0.25)
# Colors according to Month
col = monthForHour(hour)
##hourColor.append(col) # Here you get the month that the hour belongs to
if col == 0: colPnt1.append(hourPointsCirc)
elif col == 1: colPnt2.append(hourPointsCirc)
elif col == 2: colPnt3.append(hourPointsCirc)
elif col == 3: colPnt4.append(hourPointsCirc)
elif col == 4: colPnt5.append(hourPointsCirc)
elif col == 5: colPnt6.append(hourPointsCirc)
elif col == 6: colPnt7.append(hourPointsCirc)
elif col == 7: colPnt8.append(hourPointsCirc)
elif col == 8: colPnt9.append(hourPointsCirc)
elif col == 9: colPnt10.append(hourPointsCirc)
elif col == 10: colPnt11.append(hourPointsCirc)
elif col == 11: colPnt12.append(hourPointsCirc)
# Collect points according to MONTH
monthPoints.append(colPnt1)
monthPoints.append(colPnt2)
monthPoints.append(colPnt3)
monthPoints.append(colPnt4)
monthPoints.append(colPnt5)
monthPoints.append(colPnt6)
monthPoints.append(colPnt7)
monthPoints.append(colPnt8)
monthPoints.append(colPnt9)
monthPoints.append(colPnt10)
monthPoints.append(colPnt11)
monthPoints.append(colPnt12)
hourPts, chartMesh, meshFaceValues = createFrequencyMesh(orgY, dryBulbTemperature, relativeHumidity, cullMesh, lb_preparation, lb_visualization)
# Legend for Mesh of Colors from FrequencyMesh routine
if legendBasePoint == None:
####legendBasePoint = lb_visualization.BoundingBoxPar[0]
#legendBasePoint = (rc.Geometry.Point3d(lb_visualization.BoundingBoxPar[0], lb_visualization.BoundingBoxPar[1], 0))
legendBasePoint = rc.Geometry.Point3d(orgX + 101, orgY, 0)
legendChartMesh = []
legendTitle = "Hours"
legendScale = .6
#legendFontSize = 2
#lb_visualization.calculateBB(chartText, True)
lb_visualization.calculateBB(chartLayout[0:90], True)
legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(meshFaceValues, lowB, highB, numSeg, legendTitle, lb_visualization.BoundingBoxPar, legendBasePoint, legendScale, legendFont, legendFontSize)
##legendColors = lb_visualization.gradientColor(legendText[:-1], lowB, highB, customColors)
legendColors = lb_visualization.gradientColor(legendText[:-1], lowB, highB, customColors)
legendSrfs = lb_visualization.colorMesh(legendColors, legendSrfs)
legendChartMesh.append(legendSrfs)
for list in legendTextCrv:
for item in list:
legendChartMesh.append(item)
legendBasePoint = rc.Geometry.Point3d(orgX + 105, orgY, 0)
mainLegHeight = ((lb_visualization.BoundingBoxPar[2] / 10) * legendScale) * numSeg
# ***********
# List of Comfort Coordinates **************************
cf_Ptlist = [27.02+xP, 42.12+yP,0.0+zP],[18.27+xP,52.64+yP,0.0+zP], [64.03+xP,45.77+yP,0.0+zP],[80.00+xP,40.78+yP,0.0+zP],\
[27.02+xP, 42.12+yP,0.0+zP]
hrsCMF, cmfPercent, comfID, comfortPolygon, comfortPolyline = strategyDraw_Calc('COMF', shiftFactor, cf_Ptlist, hourPoints, tol, dryBulbTemperature, totalHrs, 30,30,30)
##print 'Comf hours %d, Comf Percentage %.2f' % (hrsCMF, cmfPercent)
# List of Passive Solar Heating Coordinates **************************
pshPtList = [1.07+xP,11.83+yP,0.00+zP],[1.07+xP,42.82+yP,0.00+zP],[98.97+xP,40.44+yP,0.00+zP],[98.97+xP,9.61+yP,0.00+zP], \
[1.07+xP,11.83+yP,0.00+zP]
hrsPSH, pshPercent, pshID, pshPolygon, pshPolyline = strategyDraw_Calc('PSH', shiftFactor, pshPtList, hourPoints, tol, dryBulbTemperature, totalHrs, 200, 200, 20)
##print 'PSH hours %d, PSH Percentage %.2f' % (hrsPSH, pshPercent)
#List of Evaporative Cooling Coordinates **************************
ec_Ptlist = [16.62+xP,42.42+yP,0.0+zP],[1.07+xP,57.79+yP,0.0+zP], [1.07+xP,82.21+yP,0.0+zP],[11.18+xP,82.21+yP,0.0+zP],\
[80.0+xP,45.79+yP,0.0+zP],[80.0+xP,40.78+yP,0.0+zP],[64.03+xP,45.77+yP,0.0+zP],[18.27+xP,52.64+yP,0.0+zP],\
[27.02+xP,42.12+yP,0.0+zP], [16.62+xP,42.42+yP,0.0+zP]
hrsEC, ecPercent, ecID, ecPolygon, ecPolyline = strategyDraw_Calc('EC', shiftFactor, ec_Ptlist, hourPoints, tol, dryBulbTemperature, totalHrs, 20, 220, 20)
##print 'EC hours %d, EC Percentage %.2f' % (hrsEC, ecPercent)
#List of High Termal Mass **************************
htmPtlist = [19.80+xP,42.35+yP,0.0+zP],[7.86+xP,71.28+yP,0.0+zP],[30.08+xP,71.28+yP,0.0+zP],[46.95+xP,66.03+yP,0.0+zP],\
[80.0+xP,49.12+yP,0.0+zP],[80.0+xP,40.78+yP,0.0+zP],[64.03+xP,45.77+yP,0.0+zP],[18.27+xP,52.64+yP,0.0+zP],\
[27.02+xP,42.12+yP,0.0+zP],[19.80+xP,42.35+yP,0.0+zP]
hrsHTM, htmPercent, htmID, htmPolygon, htmPolyline = strategyDraw_Calc('HTM', shiftFactor, htmPtlist, hourPoints, tol, dryBulbTemperature, totalHrs, 180, 50, 50)
##print 'HTM hours %d, HTM Percentage %.2f' % (hrsHTM, htmPercent)
#List of Natural Ventilation **************************
nvPtlist = [18.27+xP,52.64+yP,0.0+zP],[18.08+xP,62.56+yP,0.0+zP],[45.56+xP,62.56+yP,0.0+zP],[97.91+xP,53.79+yP,0.0+zP],\
[97.91+xP,40.47+yP,0.+zP], [80.0+xP,40.78+yP,0.0+zP],[64.03+xP,45.77+yP,0.0+zP],[18.27+xP,52.64+yP,0.0+zP]
hrsNV, nvPercent, nvID, nvPolygon, nvPolyline = strategyDraw_Calc('NV', shiftFactor, nvPtlist, hourPoints, tol, dryBulbTemperature, totalHrs, 50, 50, 180)
##print 'NV hours %d, NV Percentage %.2f' % (hrsNV, nvPercent)
# Collect all strategies and results
##comfort_strategyPolygons.append(comfortPolygon)
##comfort_strategyPolygons.append(pshPolygon)
##comfort_strategyPolygons.append(ecPolygon)
##comfort_strategyPolygons.append(htmPolygon)
##comfort_strategyPolygons.append(nvPolygon)
#This block draws the polygons with the same color. The block below this draws the strategies colored
#If wanted, uncomment this block and comment the block below
###comfort_strategyPolygons.append(comfortPolyline)
###comfort_strategyPolygons.append(pshPolyline)
###comfort_strategyPolygons.append(ecPolyline)
###comfort_strategyPolygons.append(htmPolyline)
###comfort_strategyPolygons.append(nvPolyline)
comfortMesh = rc.Geometry.Mesh()
comfortMesh.Append(rc.Geometry.Mesh.CreateFromBrep(comfortPolyline)[0])
comfortMesh.VertexColors.CreateMonotoneMesh(strategiesColors[0])
comfort_strategyPolygons.append(comfortMesh)
pshMesh = rc.Geometry.Mesh()
pshMesh.Append(rc.Geometry.Mesh.CreateFromBrep(pshPolyline)[0])
pshMesh.VertexColors.CreateMonotoneMesh(strategiesColors[1])
comfort_strategyPolygons.append(pshMesh)
ecMesh = rc.Geometry.Mesh()
ecMesh.Append(rc.Geometry.Mesh.CreateFromBrep(ecPolyline)[0])
ecMesh.VertexColors.CreateMonotoneMesh(strategiesColors[2])
comfort_strategyPolygons.append(ecMesh)
htmMesh = rc.Geometry.Mesh()
htmMesh.Append(rc.Geometry.Mesh.CreateFromBrep(htmPolyline)[0])
htmMesh.VertexColors.CreateMonotoneMesh(strategiesColors[3])
comfort_strategyPolygons.append(htmMesh)
nvMesh = rc.Geometry.Mesh()
nvMesh.Append(rc.Geometry.Mesh.CreateFromBrep(nvPolyline)[0])
nvMesh.VertexColors.CreateMonotoneMesh(strategiesColors[4])
comfort_strategyPolygons.append(nvMesh)
#for count, value in enumerat(evapCooling):
# if comfortList[count] == 1:
# value = 0
# else: pass
comfortResults = []
comfortResults.append('CMF: ' + str(hrsCMF) + ' hours, ' + str(cmfPercent) + '%')
comfortResults.append('PSH: ' + str(hrsPSH) + ' hours, ' + str(pshPercent) + '%')
comfortResults.append('EC: ' + str(hrsEC) + ' hours, ' + str(ecPercent) + '%')
comfortResults.append('HTM: ' + str(hrsHTM) + ' hours, ' + str(htmPercent) + '%')
comfortResults.append('NV: ' + str(hrsNV) + ' hours, ' + str(nvPercent) + '%')
strategyPercent = []
strategyPercent.append(cmfPercent)
strategyPercent.append(pshPercent)
strategyPercent.append(ecPercent)
strategyPercent.append(htmPercent)
strategyPercent.append(nvPercent)
strategyHours = []
strategyHours.append(hrsCMF)
strategyHours.append(hrsPSH)
strategyHours.append(hrsEC)
strategyHours.append(hrsHTM)
strategyHours.append(hrsNV)
min_maxPointsList, strategyMonth, colByComfortList, colByMonthList = monthCalcs(totalHrs, comfID, pshID, ecID, htmID, nvID, dryBulbTemperature, relativeHumidity, monthPoints, monthColors, comfortNOcomfortColors)
#f = open("C:\WorkingFolder\Courses\Rhino-Grasshopper-Diva\PythonStuff\BioclimaticChart\dataTMP.csv","wr")
#f.write('SeasonID,Temperature,RelativeHumidity, Comfort, PSH, EC, HTM, NV\n')
#for m in range(0, totalHrs):
# string = str(seasonID[m])+', '+str(dbtValue[m])+', '+str(rhValue[m])+', '+str(comfID[m])+', '+str(pshID[m])+', '+str(ecID[m])+', '+str(htmID[m])+', '+str(nvID[m])+','
# f.write(string+'\n')
#f.close()
totalComfortOrNot, strategyOrNotList = checkComfortOrNot(strategyNames, totalHrs, comfID, pshID, ecID, htmID, nvID, epwData, epwStr)
chartLegend = createChartLegend(orgX, orgY, orgZ, strategyNames, lb_preparation, legendScale, legendFont, legendFontSize, lb_visualization, strategiesColors, monthColors, comfortNOcomfortColors, customColors, totalComfortOrNot)
if calculateCharts_ == True:
resultsChart = showResults(basePoint_, mainLegHeight, strategyNames, strategyPercent, strategyHours, strategyMonth, \
lowB, highB, numSeg, customColors, legendBasePoint, legendScale, legendFont, legendFontSize, lb_preparation, lb_visualization, strategiesColors)
else:
resultsChart = None
#If the user has selected to scale or move the geometry, scale it all and/or move it all.
#if basePoint_ != None:
#basePoint = basePoint_
#else: basePoint = rc.Geometry.Point3d(0,0,0)
if scale_ != None:
scale = scale_
else: scale = 1
chartGridAndTxt = []
for item in gridLines: chartGridAndTxt.append(item)
for item in chartLayout: chartGridAndTxt.append(item)
for item in chartLegend: chartGridAndTxt.append(item)
scaleFinal = rc.Geometry.Transform.Scale(basePoint, scale)
move = rc.Geometry.Transform.Translation(basePoint.X, basePoint.Y, basePoint.Z)
transformMtx = scaleFinal * move
for geo in comfort_strategyPolygons: geo.Transform(transformMtx)
chartMesh.Transform(transformMtx)
for geo in chartLayout: geo.Transform(transformMtx)
for geo in chartLegend: geo.Transform(transformMtx)
for geo in legendChartMesh: geo.Transform(transformMtx)
legendBasePoint.Transform(transformMtx)
for geo in gridLines: geo.Transform(transformMtx)
for geoList in monthPoints:
for geo in geoList:
geo.Transform(transformMtx)
for geoList in min_maxPointsList:
for geo in geoList:
geo.Transform(transformMtx)
if calculateCharts_ == True:
for geo in resultsChart: geo.Transform(transformMtx)
#hourPointColorsByComfort = []
#hourPointColorsByMonth = []
#Unpack the data tree of point colors. THIS WORKS FINE BUT I LEFT THE BELOW's OPTION.
#hourPointColorsByComfort = DataTree[Object]()
#for listCount, list in enumerate(colByComfortList):
# for item in list:
# hourPointColorsByComfort.Add(item, GH_Path(listCount))
#print 'monthPoints ', len(monthPoints[0])
chartHourPoints = raggedListToDataTree(monthPoints)
hourPointColorsByComfort = raggedListToDataTree(colByComfortList)
hourPointColorsByMonth = raggedListToDataTree(colByMonthList)
strategyOrNot = raggedListToDataTree(strategyOrNotList)
##monthPointsTree = raggedListToDataTree(monthPoints)
min_maxPoints = raggedListToDataTree(min_maxPointsList)
return comfortResults, totalComfortOrNot, strategyOrNot, \
chartGridAndTxt, chartMesh, legendChartMesh, chartHourPoints, hourPointColorsByComfort, hourPointColorsByMonth, min_maxPoints, \
comfort_strategyPolygons, legendComfortStrategies, legendBasePt, resultsChart
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return -1
#Check the inputs.
checkData = False
if _runIt == True:
checkData, epwData, epwStr = checkInputs()
#If the inputs are good, run the function.
if checkData == True:
comfortResults, totalComfortOrNot, strategyOrNot, \
chartGridAndTxt, chartMesh, legendChartMesh, chartHourPoints, hourPointColorsByComfort, hourPointColorsByMonth, min_maxPoints, \
comfort_strategyPolygons, legendComfortStrategies, legendBasePt, resultsChart = main(epwData, epwStr)
#Hide/Show outnputs
ghenv.Component.Params.Output[6].Hidden = False # Grid and Text
ghenv.Component.Params.Output[7].Hidden = True # Chart Mesh
ghenv.Component.Params.Output[8].Hidden = True # legendChartMesh
ghenv.Component.Params.Output[9].Hidden = True # chartHourPoints
#ghenv.Component.Params.Output[10].Hidden = True # hourPointColorsByComfort - This is data
#ghenv.Component.Params.Output[11].Hidden = True # hourPointColorsByMonth - This is data
ghenv.Component.Params.Output[12].Hidden = True # min/max Points
ghenv.Component.Params.Output[13].Hidden = False # StrategyPolygons
ghenv.Component.Params.Output[14].Hidden = True # legend
ghenv.Component.Params.Output[17].Hidden = True # Results Chart
|
samuto/ladybug
|
src/Ladybug_Bioclimatic Chart.py
|
Python
|
gpl-3.0
| 83,572
|
[
"EPW"
] |
d5297a84bdbcd32351192cc05792514e7eb1b4c66aafed5f2374085ad89bd787
|
''' Generate a reference for alignment
This |spi| batch file (`spi-reference`) generates a properly filtered and scaled reference for alignment.
Tips
====
#. The volumes do not have to be in SPIDER format (automatic conversion is attempted). However,
:option:`--data-ext` must be used to set the appropriate SPIDER extension.
#. For MRC volumes, the pixel size is extracted from the header, otherwise you must specify it with
`--curr-apix`.
Examples
========
.. sourcecode :: sh
# Source AutoPart - FrankLab only
$ source /guam.raid.cluster.software/arachnid/arachnid.rc
# Create a reference filtered to 30 A from an MRC map
$ spi-reference emd_1076.map -p params.ter -o reference.ter --data-ext ter -r 30
Critical Options
================
.. program:: spi-reference
.. option:: -i <FILENAME1,FILENAME2>, --input-files <FILENAME1,FILENAME2>, FILENAME1 FILENAME2
List of input filenames containing volumes to convert to references.
If you use the parameters `-i` or `--inputfiles` they must be comma separated
(no spaces). If you do not use a flag, then separate by spaces. For a
very large number of files (>5000) use `-i "filename*"`
.. option:: -o <FILENAME>, --output <FILENAME>
Output filename for references with correct number of digits (e.g. enhanced_0000.spi)
.. option:: -p <FILENAME>, --param-file <FILENAME>
Path to SPIDER params file
.. option:: --bin-factor <FLOAT>
Number of times to decimate params file
.. option:: --resolution <FLOAT>
Resolution to filter the volumes
.. option:: --curr_apix <FLOAT>
Current pixel size of the input volume (only necessary if not MRC)
Low-pass Filter Options
=======================
.. option:: --filter-type <INT>
Type of low-pass filter to use with resolution: [1] Fermi(SP, fermi_temp) [2] Butterworth (SP-bp_pass, SP+bp_stop) [3] Gaussian (SP)
.. option:: --fermi-temp <FLOAT>
Fall off for Fermi filter (both high pass and low pass)
.. option:: --bw-pass <FLOAT>
Offset for pass band of the butterworth lowpass filter (sp-bw_pass)
.. option:: --bw-stop <FLOAT>
Offset for stop band of the butterworth lowpass filter (sp+bw_stop)
Other Options
=============
This is not a complete list of options available to this script, for additional options see:
#. :ref:`Options shared by all scripts ... <shared-options>`
#. :ref:`Options shared by |spi| scripts... <spider-options>`
#. :ref:`Options shared by file processor scripts... <file-proc-options>`
#. :ref:`Options shared by SPIDER params scripts... <param-options>`
.. Created on Jul 15, 2011
.. codeauthor:: Robert Langlois <rl2528@columbia.edu>
'''
from ..core.app import program
from ..core.metadata import spider_params, spider_utility
from ..core.parallel import mpi_utility
from ..core.image import ndimage_file
from ..core.spider import spider, spider_file
import filter_volume
import logging, os, numpy
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def process(filename, spi, output, resolution, curr_apix=0.0, disable_center=False, disable_even=False, **extra):
''' Create a reference from from a given density map
:Parameters:
filename : str
Input volume file
output : str
Output reference file
resolution : float
Target resolution to filter reference
curr_apix : float, optional
Current pixel size of input map
extra : dict
Unused key word arguments
:Returns:
filename : str
Filename for correct location
'''
if not disable_even and (extra['window']%2)==1:
extra['window'] += 1
_logger.info("Window size: %d (Forced Even)"%(extra['window']))
_logger.info("Processing: %s"%os.path.basename(filename))
_logger.info("Finished: %d,%d"%(0,5))
if spider_utility.is_spider_filename(filename):
output = spider_utility.spider_filename(output, filename)
header = ndimage_file.read_header(filename)
if curr_apix == 0:
if header['apix'] == 0: raise ValueError, "Pixel size of input volume is unknown - please use `--curr-apix` to set it"
curr_apix = header['apix']
_logger.info("Pixel size: %f for %s"%(curr_apix, filename))
tempfile = mpi_utility.safe_tempfile(spi.replace_ext('tmp_spi_file'))
try:
filename = spider_file.copy_to_spider(filename, tempfile)
except:
if os.path.dirname(tempfile) == "": raise
tempfile = mpi_utility.safe_tempfile(spi.replace_ext('tmp_spi_file'), False)
filename = spider_file.copy_to_spider(filename, tempfile)
w, h, d = spi.fi_h(filename, ('NSAM', 'NROW', 'NSLICE'))
if w != h: raise ValueError, "Width does not match height - requires box"
if w != d: raise ValueError, "Width does not match depth - requires box"
_logger.info("Finished: %d,%d"%(1,5))
_logger.debug("Filtering volume")
if resolution > 0:
filename = filter_volume.filter_volume_lowpass(filename, spi, extra['apix']/resolution, outputfile=output, **extra)
_logger.info("Finished: %d,%d"%(2,5))
_logger.debug("Resizing volume")
filename = resize_volume(filename, spi, curr_apix, outputfile=output, **extra)
_logger.info("Finished: %d,%d"%(3,5))
if not disable_center:
_logger.debug("Centering volume")
filename = center_volume(filename, spi, output)
_logger.info("Finished: %d,%d"%(4,5))
try:
if os.path.exists(tempfile): os.unlink(tempfile)
except: pass
return filename
def center_volume(filename, spi, output):
''' Center the volume in the box
:Parameters:
filename : str
Input volume file
spi : spider.Session
Current SPIDER session
output : str
Output centered volume file
:Returns:
output : str
Output centered volume file
'''
if filename == output: filename = spi.cp(filename)
coords = spi.cg_ph(filename)
return spi.sh_f(filename, tuple(-numpy.asarray(coords[3:])), outputfile=output)
def resize_volume(filename, spi, curr_apix, apix, window, outputfile=None, **extra):
''' Interpolate the volume and change the box size to match the params file
:Parameters:
filename : str
Input volume file
spi : spider.Session
Current SPIDER session
curr_apix : float
Pixel size of input volume
apix : float
Target pixel size (params file)
window : float
Target window size (params file)
output : str
Output interpolated volume file
:Returns:
output : str
Output interpolated volume file
'''
w, h, d = spi.fi_h(filename, ('NSAM', 'NROW', 'NSLICE'))
if w != h: raise ValueError, "Width does not match height - requires box"
if w != d: raise ValueError, "Width does not match depth - requires box"
if not numpy.allclose(curr_apix, apix):
bin_factor = curr_apix / apix
_logger.info("Interpolating Structure: %f * %f = %f | %f/%f | %f"%(w, bin_factor, w*bin_factor, apix, curr_apix, window))
w *= bin_factor
h *= bin_factor
d *= bin_factor
filename = spi.ip(filename, (int(w), int(h), int(d)))
if w < window:
_logger.info("Increasing window size from %d -> %d"%(w, window))
filename = spi.pd(filename, window, outputfile=outputfile)
elif w > window:
_logger.info("Decreasing window size from %d -> %d"%(w, window))
filename = spi.wi(filename, window, outputfile=outputfile)
return outputfile
def initialize(files, param):
# Initialize global parameters for the script
if len(files) == 0: return
if param['param_file'] != "" and os.path.splitext(param['param_file'])[1] != "":
_logger.warn("Using extension from SPIDER params file: %s"%param['param_file'])
files=[param['param_file']]
param['spi'] = spider.open_session(files, **param)
if param['new_window'] > 0:
param['bin_factor'] = float(param['window'])/param['new_window']
_logger.info("Params: %s"%param['param_file'])
_logger.info("Bin-factor: %f"%param['bin_factor'])
spider_params.read(param['spi'].replace_ext(param['param_file']), param)
_logger.info("Pixel size: %f"%param['apix'])
_logger.info("Window: %f"%param['window'])
_logger.info("Thread Count: %d"%param['thread_count'])
if os.path.dirname(param['output']) != "" and not os.path.exists(os.path.dirname(param['output'])):
_logger.info("Creating directory: %s"%os.path.dirname(param['output']))
try: os.makedirs(os.path.dirname(param['output']))
except: pass
def finalize(files, **extra):
# Finalize global parameters for the script
_logger.info("Completed")
def supports(files, raw_reference_file="", **extra):
''' Test if this module is required in the project workflow
:Parameters:
files : list
List of filenames to test
raw_reference_file : str
Input filename for raw reference map
extra : dict
Unused keyword arguments
:Returns:
flag : bool
True if this module should be added to the workflow
'''
return raw_reference_file != ""
def setup_options(parser, pgroup=None, main_option=False):
#Setup options for automatic option parsing
from ..core.app.settings import setup_options_from_doc
if main_option:
pgroup.add_option("-i", "--raw-reference-file", input_files=[], help="List of input filenames containing volumes to convert to references", required_file=True, gui=dict(filetype="file-list"))
pgroup.add_option("-o", "--reference-file", output="", help="Output filename for references with correct number of digits (e.g. enhanced_0000.spi)", gui=dict(filetype="save"), required_file=True)
spider_params.setup_options(parser, pgroup, True)
pgroup.add_option("-r", resolution=30.0, help="Resolution to filter the volumes")
pgroup.add_option("", curr_apix=0.0, help="Current pixel size of the input volume (only necessary if not MRC)")
pgroup.add_option("", new_window=0.0, help="Set bin_factor based on new window size")
pgroup.add_option("", disable_center=False, help="Disable centering")
setup_options_from_doc(parser, spider.open_session, group=pgroup)
parser.change_default(thread_count=4, log_level=3)
def check_options(options, main_option=False):
#Check if the option values are valid
from ..core.app.settings import OptionValueError
path = spider.determine_spider(options.spider_path)
if path == "": raise OptionValueError, "Cannot find SPIDER executable in %s, please use --spider-path to specify"%options.spider_path
if main_option:
spider_params.check_options(options)
#if options.resolution <= 0.0: raise OptionValueError, "--resolution must be a positive value greater than 0"
if not spider_utility.test_valid_spider_input(options.input_files):
raise OptionValueError, "Multiple input files must have numeric suffix, e.g. vol0001.spi"
def flags():
''' Get flags the define the supported features
:Returns:
flags : dict
Supported features
'''
return dict(description = '''Generate a reference for alignment
Example:
$ %prog emd_1076.map -p params.ter -o reference.ter --data-ext ter -r 30
''',
supports_MPI=False,
supports_OMP=False,
use_version=True,
max_filename_len=78)
def main():
#Main entry point for this script
program.run_hybrid_program(__name__)
def dependents(): return [filter_volume]
if __name__ == "__main__": main()
|
ezralanglois/arachnid
|
arachnid/pyspider/reference.py
|
Python
|
gpl-2.0
| 12,115
|
[
"Gaussian"
] |
41b50b4f1e6ff1db61380ae79148848a97deea1fccfde85675aa5543a74e82d9
|
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import connection
from collections import defaultdict
import os
from datetime import datetime, timedelta
from common.utils import Date
from schools.models import (
School, Boundary)
from stories.models import (
Question, Questiongroup, QuestionType,
QuestiongroupQuestions, Source, UserType,
Story, Answer)
from optparse import make_option
from collections import OrderedDict
from django.db.models import Q, Count
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, cm
from reportlab.platypus import Paragraph, Table, TableStyle, Image
from reportlab.platypus.flowables import HRFlowable
from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT, TA_CENTER
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from common.utils import send_attachment
class Command(BaseCommand):
help = """
Description:
Generates a report on SMS data. Accepts
either a --from and --to date pair or a --days
parameter. Days will be calculated backwards from
today. Also accepts --emails as a list of email
IDs for the generated report to be sent to.
Usage:
./manage.py generate_sms_report [--from=YYYY-MM-DD] [--to=YYYY-MM-DD] \
[--days=number_of_days] --emails=a@b.com,c@d.com
"""
option_list = BaseCommand.option_list + (
make_option('--from',
help='Start date'),
make_option('--to',
help='End date'),
make_option('--days',
help='Number of days'),
make_option('--emails',
help='Comma separated list of email ids'),
)
def handle(self, *args, **options):
start_date = options.get('from')
end_date = options.get('to')
days = options.get('days')
emails = options.get('emails')
if not emails:
print """
Error:
--emails parameter not specificed.
"""
print self.help
return
elif days:
end_date = datetime.today()
start_date = end_date - timedelta(days=int(days))
elif start_date or end_date:
if not (start_date and end_date):
print """
Error:
Please specify both --from and --to parameters.
"""
print self.help
return
date = Date()
if start_date:
sane = date.check_date_sanity(start_date)
if not sane:
print """
Error:
Wrong --from format. Expected YYYY-MM-DD
"""
print self.help
return
else:
start_date = date.get_datetime(start_date)
if end_date:
sane = date.check_date_sanity(end_date)
if not sane:
print """
Error:
Wrong --to format. Expected YYYY-MM-DD
"""
print self.help
return
else:
end_date = date.get_datetime(end_date)
else:
print self.help
return
emails = emails.split(",")
districts = []
gka_district_ids = set(
Story.objects.filter(
group__source__name="sms"
).values_list(
'school__admin3__parent__parent__id',
flat=True
)
)
for district_id in gka_district_ids:
district = Boundary.objects.get(id=district_id)
admin1_json = { 'name': district.name, 'id': district.id}
admin1_json['sms'] = self.get_story_meta(district.id,'district',start_date, end_date)
admin1_json['details'] = self.get_story_details(district.id,'district',start_date, end_date)
admin1_json['blocks'] = []
#print admin1_json
blocks = (Boundary.objects.all_active().filter(
parent_id=district_id,
type=district.type
).select_related('boundarycoord__coord', 'type__name',
'hierarchy__name'))
for block in blocks:
admin2_json = { 'name': block.name, 'id': block.id}
admin2_json['sms'] = self.get_story_meta(block.id,'block', start_date, end_date)
admin2_json['details'] = self.get_story_details(block.id,'block', start_date, end_date)
#if(int(admin2_json['sms']['stories']) > 0):
admin1_json['blocks'].append(admin2_json)
districts.append(admin1_json)
for each in districts:
blks = self.transform_data(each)
for blk in blks:
self.make_pdf(blk,start_date,end_date,blk[0][1],emails)
def make_pdf(self, data, start_date, end_date, filename, emails):
width, height = A4
styles = getSampleStyleSheet()
styleN = styles["BodyText"]
styleN.alignment = TA_LEFT
styleN.fontName = 'Helvetica'
styleN.textColor = colors.black
styleBH = styles["Heading3"]
styleBH.alignment = TA_CENTER
styleBH.fontName = 'Helvetica'
styleBH.textColor = colors.darkslategray
styleTH = styles["Heading1"]
styleTH.alignment = TA_CENTER
styleTH.fontName = 'Helvetica'
styleTH.textColor = colors.darkslateblue
styleGH = styles["Heading2"]
styleGH.alignment = TA_CENTER
styleGH.fontName = 'Helvetica'
styleGH.textColor = colors.darkslategray
#styleGH.backColor = colors.lightgrey
styleNC = styles["BodyText"]
#styleNC.alignment = TA_CENTER
styleNC.fontName = 'Helvetica'
def coord(x, y, unit=1):
x, y = x * unit, height - y * unit
return x, y
def style_row(row_array, style):
styled_array = []
for each in row_array:
styled_array.extend([Paragraph(str(each),style)])
return styled_array
c = canvas.Canvas(os.path.join(settings.PDF_REPORTS_DIR, 'gka_sms/')+filename+".pdf", pagesize=A4)
#logo
logo_image = Image("%s/images/akshara_logo.jpg" % settings.STATICFILES_DIRS)
logo_image.drawOn(c, *coord(14, 3, cm))
#HR
hr = HRFlowable(width="80%", thickness=1, lineCap='round', color=colors.lightgrey, spaceBefore=1, spaceAfter=1, hAlign='CENTER', vAlign='BOTTOM', dash=None)
hr.wrapOn(c, width, height)
hr.drawOn(c, *coord(1.8, 3.2, cm))
#Headings
header = Paragraph('GKA SMS Summary<br/><hr/>', styleTH)
header.wrapOn(c, width, height)
header.drawOn(c, *coord(0, 4, cm))
#Date Range
date_range = Paragraph("From " + start_date.strftime("%d %b, %Y") + " to " + end_date.strftime("%d %b, %Y"), styleBH)
date_range.wrapOn(c, width, height)
date_range.drawOn(c, *coord(0, 4.5, cm))
#Details
styled_data = [style_row(data[0],styleGH)]
for row in data[1:4]:
styled_data.append(style_row(row,styleN))
table_header = Table(styled_data, colWidths=[7 * cm,
5* cm, 5 * cm])
table_header.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.lightgrey),
('BOX', (0,0), (-1,-1), 0.25, colors.lightgrey),
('LINEBELOW', (0,0), (2, 0), 1.0, colors.darkgrey),
('LINEBELOW', (0,3), (2, 3), 1.0, colors.darkgrey),
]))
table_header.wrapOn(c, width, height)
table_header.drawOn(c, *coord(1.8, 9, cm))
#Questions
styled_data =[style_row(['Questions','Yes','No','Yes','No'],styleBH)]
for row in data[4:len(data)]:
styled_data.append(style_row(row,styleN))
table = Table(styled_data, colWidths=[7 * cm,
2.5 * cm, 2.5 * cm,
2.5 * cm, 2.5 * cm])
table.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.lightgrey),
('BOX', (0,0), (-1,-1), 0.25, colors.lightgrey),
#('LINEBELOW', (0,0), (2, 0), 1.0, colors.green),
]))
table.wrapOn(c, width, height)
table.drawOn(c, *coord(1.8, 17.5, cm))
#Footer
#HR
hr = HRFlowable(width="80%", thickness=1, lineCap='round', color=colors.lightgrey, spaceBefore=1, spaceAfter=1, hAlign='CENTER', vAlign='BOTTOM', dash=None)
hr.wrapOn(c, width, height)
hr.drawOn(c, *coord(1.8, 27, cm))
#Disclaimer
klp_text = Paragraph("This report has been generated by Karnataka Learning Partnership(www.klp.org.in/gka) for Akshara Foundation.",styleN)
klp_text.wrapOn(c, width, height)
klp_text.drawOn(c, *coord(1.8, 27.5, cm))
c.save()
self.send_email(start_date.strftime("%d/%m/%Y") + " to " + end_date.strftime("%d/%m/%Y"),filename, emails)
def send_email(self,date_range, block, emails):
print 'Sending email for', block
send_attachment(
from_email=settings.EMAIL_DEFAULT_FROM,
to_emails=emails,
subject= block + ': GKA SMS Report for '+ date_range,
folder='gka_sms',
filename=block
)
def get_json(self, source, stories_qset):
json = {}
json['stories'] = stories_qset.count()
json['schools'] = stories_qset.distinct('school').count()
return json
def transform_data(self, district):
blocks =[]
questions = {}
gka_question_seq = ['How many responses indicate that Math classes were happening in class 4 and 5 during the visit?',
'How many responses indicate that class 4 and 5 math teachers are trained in GKA methodology in the schools visited?',
'How many responses indicate evidence of Ganitha Kalika Andolana TLM being used in class 4 or 5 during the visit?',
'How many responses indicate that representational stage was being practiced during Math classes in class 4 and 5 during the visit?',
'How many responses indicate that group work was happening in the schools visited?']
for block in district["blocks"]:
data = [["Details", "Block-"+block["name"].capitalize(), "District-"+district["name"].capitalize()]]
data.append(["Schools", block["sms"]["schools"], district["sms"]["schools"]])
data.append(["SMS Messages", block["sms"]["stories"], district["sms"]["stories"]])
data.append(["Schools with SMS Messages", block["sms"]["schools_with_stories"], district["sms"]["schools_with_stories"]])
for each in block["details"]:
questions[each["question"]["display_text"]]= {"block": self.get_response_str(each["answers"])}
for each in district["details"]:
questions[each["question"]["display_text"]]["district"] = self.get_response_str(each["answers"])
custom_sort = self.make_custom_sort([ gka_question_seq ])
result = custom_sort(questions)
for question in result:
row = [question]
row.extend(questions[question]["block"])
row.extend(questions[question]["district"])
data.append(row)
blocks.append(data)
return blocks
def make_custom_sort(self,orders):
orders = [{k: -i for (i, k) in enumerate(reversed(order), 1)} for order in orders]
def process(stuff):
if isinstance(stuff, dict):
l = [(k, process(v)) for (k, v) in stuff.items()]
keys = set(stuff)
for order in orders:
if keys.issuperset(order):
return OrderedDict(sorted(l, key=lambda x: order.get(x[0], 0)))
return OrderedDict(sorted(l))
if isinstance(stuff, list):
return [process(x) for x in stuff]
return stuff
return process
def get_response_str(self, answers):
yes = 0
no = 0
if answers["options"]:
if "Yes" in answers["options"]:
yes = answers["options"]["Yes"]
if "No" in answers["options"]:
no = answers["options"]["No"]
#return [str(yes)+'('+str((yes*100)/(yes+no))+'%)',str(no)+'('+str((no*100)/(yes+no))+'%)']
return [str(yes),str(no)]
else:
return ["No Responses","-"]
def source_filter(self, source, stories_qset):
stories_qset = stories_qset.filter(
group__source__name=source)
return stories_qset
def get_story_meta(self, boundary_id, boundary_type, start_date, end_date):
source = 'sms'
admin2_id = None
admin1_id = None
if boundary_type == 'block':
admin2_id = boundary_id
if boundary_type == 'district':
admin1_id = boundary_id
school_type = 'Primary School'
school_qset = School.objects.filter(
admin3__type__name=school_type, status=2)
stories_qset = Story.objects.filter(
school__admin3__type__name=school_type)
if admin1_id:
school_qset = school_qset.filter(
schooldetails__admin1__id=admin1_id)
stories_qset = stories_qset.filter(
school__schooldetails__admin1__id=admin1_id)
if admin2_id:
school_qset = school_qset.filter(
schooldetails__admin2__id=admin2_id)
stories_qset = stories_qset.filter(
school__schooldetails__admin2__id=admin2_id)
if start_date:
stories_qset = stories_qset.filter(
date_of_visit__gte=start_date)
if end_date:
stories_qset = stories_qset.filter(
date_of_visit__lte=end_date)
if source:
stories_qset = self.source_filter(
source,
stories_qset
)
#print stories_qset.count()
response_json = {}
response_json['schools'] = school_qset.count()
response_json['stories'] = stories_qset.count()
response_json['schools_with_stories'] = stories_qset.distinct('school').count()
#print response_json
return response_json
def get_que_and_ans(self, stories, source, school_type):
response_list = []
questions = Question.objects.all().select_related('question_type')
if source:
questions = questions.filter(
questiongroup__source__name=source)
if school_type:
questions = questions.filter(
school_type__name=school_type)
#print questions.count()
for question in questions.distinct('id'):
j = {}
j['question'] = {}
j['question']['key'] = question.key
j['question']['text'] = question.text
j['question']['display_text'] = question.display_text
j['answers'] = {}
j['answers']['question_type'] = question.question_type.name
answer_counts = question.answer_set.filter(
story__in=stories
).values('text').annotate(answer_count=Count('text'))
options = {}
for count in answer_counts:
options[count['text']] = count['answer_count']
j['answers']['options'] = options
response_list.append(j)
return response_list
def get_story_details(self, boundary_id, boundary_type, start_date, end_date):
source = 'sms'
admin1_id = None
admin2_id = None
school_type = 'Primary School'
if boundary_type == 'block':
admin2_id = boundary_id
if boundary_type == 'district':
admin1_id = boundary_id
stories = Story.objects.all()
if source:
stories = stories.filter(group__source__name=source)
if school_type:
stories = stories.filter(school__admin3__type__name=school_type)
if admin1_id:
stories = stories.filter(
school__schooldetails__admin1__id=admin1_id
)
if admin2_id:
stories = stories.filter(
school__schooldetails__admin2__id=admin2_id
)
if start_date:
stories = stories.filter(date_of_visit__gte=start_date)
if end_date:
stories = stories.filter(date_of_visit__lte=end_date)
response_json = self.get_que_and_ans(stories, source, school_type)
return response_json
|
klpdotorg/dubdubdub
|
apps/stories/management/commands/generate_sms_report.py
|
Python
|
mit
| 17,179
|
[
"VisIt"
] |
83df2da148cdcc76c6934db4db7a78e3d77c4744acaa37183458c0055648644c
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import mkt
from mkt.constants.applications import DEVICE_TYPES
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.reviewers.models import EscalationQueue
from mkt.site.fixtures import fixture
from mkt.site.tests import formset, initial, TestCase, user_factory
from mkt.site.tests.test_utils_ import get_image_path
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.translations.models import Translation
from mkt.users.models import UserNotification, UserProfile
from mkt.users.notifications import app_surveys
from mkt.webapps.models import AddonDeviceType, AddonUser, AppFeatures, Webapp
class TestSubmit(TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
self.login(self.user.email)
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(email='regular@mozilla.com')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
subscribe_mock.assert_called_with(
self.user.email, 'app-dev', lang='en-US',
country='restofworld', format='H',
source_url='http://testserver/developers/submit')
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
response = self.client.post(self.url, data, follow=True)
eq_(response.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'form' in response.context:
eq_(response.context['form'].errors, {})
return response
class BaseWebAppTest(BaseUploadTest, UploadAddon, TestCase):
fixtures = fixture('user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
self.upload.update(name=self.manifest_url)
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 0)
self.post(data=data)
return Webapp.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
self.login('clouserw@mozilla.com')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
eq_(addon.latest_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
ok_(addon.id)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.latest_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.latest_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, mkt.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[mkt.DEVICE_TABLET, mkt.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [mkt.DEVICE_GAIA])
eq_(app.premium_type, mkt.ADDON_FREE)
def test_premium(self):
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [mkt.DEVICE_GAIA])
eq_(app.premium_type, mkt.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "pt" which is in the
# SHORTER_LANGUAGES setting and should get converted to "pt-PT".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'pt-PT')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest,
user=UserProfile.objects.get(pk=999))
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.latest_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.latest_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.upload = self.get_upload(
abspath=self.package,
user=UserProfile.objects.get(email='regular@mozilla.com'))
self.upload.update(name='mozball.zip')
self.url = reverse('submit.app')
self.login('regular@mozilla.com')
@property
def package(self):
return self.packaged_app_path('mozball.zip')
def post_addon(self, data=None):
eq_(Webapp.objects.count(), 1)
self.post(data=data)
return Webapp.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestEscalatePrereleaseWebApp(BasePackagedAppTest):
def setUp(self):
super(TestEscalatePrereleaseWebApp, self).setUp()
user_factory(email=settings.NOBODY_EMAIL_ADDRESS)
def post(self):
super(TestEscalatePrereleaseWebApp, self).post(data={
'free_platforms': ['free-firefoxos'],
'packaged': True,
})
def test_prerelease_permissions_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['moz-attention']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 1)
def test_normal_permissions_dont_get_escalated(self):
validation = json.loads(self.upload.validation)
validation['permissions'] = ['contacts']
self.upload.update(validation=json.dumps(validation))
eq_(EscalationQueue.objects.count(), 0)
self.post()
eq_(EscalationQueue.objects.count(), 0)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.latest_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.latest_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=mkt.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
self.cat1 = 'books'
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(
addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.webapp.update(categories=[self.cat1])
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
self.login('clouserw@mozilla.com')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1],
'flash': '1',
'publish_type': mkt.PUBLISH_IMMEDIATE,
'notes': 'yes'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'publish_type': mkt.PUBLISH_IMMEDIATE,
}
if expected:
expected_data.update(expected)
uses_flash = expected_data.pop('uses_flash')
eq_(addon.latest_version.all_files[0].uses_flash, uses_flash)
self.assertSetEqual(addon.device_types, self.device_types)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, mkt.STATUS_NULL)
eq_(self.webapp.highest_status, mkt.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = mkt.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_approved(self):
self._step()
data = self.get_dict(publish_type=mkt.PUBLISH_PRIVATE)
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data,
expected={'publish_type': mkt.PUBLISH_PRIVATE})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Webapp.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in mkt.APP_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, mkt.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(
r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional_if_email_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_optional_if_url_present(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertNoFormErrors(r)
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid email address.')
def test_support_need_email_or_url(self):
self._step()
res = self.client.post(self.url, self.get_dict(support_email=None,
support_url=None))
self.assertFormError(
res, 'form_basic', 'support',
'You must provide either a website, an email, or both.')
ok_(pq(res.content)('#support-fields .error #trans-support_url'))
ok_(pq(res.content)('#support-fields .error #trans-support_email'))
# While the inputs will get the error styles, there is no need for an
# individual error message on each, the hint on the parent is enough.
eq_(pq(res.content)('#support-fields .error .errorlist').text(), '')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(mkt.MAX_CATEGORIES, 2)
cat2 = 'games'
cat3 = 'social'
cats = [self.cat1, cat2, cat3]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories), sorted(cats))
def test_categories_add(self):
self._step()
cat2 = 'games'
self._post_cats([self.cat1, cat2])
def test_categories_add_and_remove(self):
self._step()
cat2 = 'games'
self._post_cats([cat2])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = 'games'
self.webapp.update(categories=[self.cat1, cat2])
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1])
class TestDone(TestSubmit):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'],
'next_steps')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.login(self.user.email)
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=mkt.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
clouserw/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,827
|
[
"exciting"
] |
89680f2bc0bfa2390c2cc11fb41dde7482ac00133ec85f5a0e8e60bc9b6a81db
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CCommonName(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.compartment=self.model.createCompartment("Comp1")
self.metab=self.model.createMetabolite("metab3","Comp1")
self.cn=self.metab.getCN()
self.model.compileIfNecessary()
def test_getPrimary(self):
prim=self.cn.getPrimary()
self.assert_(prim.__class__==COPASI.CCommonName)
self.assert_(prim.getString()=="CN=Root")
def test_getRemainder(self):
prim=self.cn.getRemainder()
self.assert_(prim.__class__==COPASI.CCommonName)
self.assert_(prim.getString()=='Model=New Model,Vector=Compartments[Comp1],Vector=Metabolites[metab3]')
def test_getObjectType(self):
prim=self.cn.getObjectType()
self.assert_(type(prim)==StringType)
self.assert_(prim=="CN")
def test_getObjectName(self):
prim=self.cn.getObjectName()
self.assert_(type(prim)==StringType)
self.assert_(prim=="Root")
def test_escape(self):
a="This- \ \ is a test--!"
o=COPASI.CCommonName.escape(a)
self.assert_(type(o)==StringType)
self.assert_(len(o)==len(a)+2)
def test_unescape(self):
a="This- \\ \\ is a test--!"
o=COPASI.CCommonName.unescape(a)
self.assert_(type(o)==StringType)
self.assert_(len(o)==len(a)-2)
def suite():
tests=[
'test_getPrimary'
,'test_getRemainder'
,'test_getObjectType'
,'test_getObjectName'
,'test_escape'
,'test_unescape'
]
return unittest.TestSuite(map(Test_CCommonName,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CCopasiObjectName.py
|
Python
|
artistic-2.0
| 2,523
|
[
"COPASI"
] |
dffc6ec29610bd8e4b7a478cf45e0fb99079699449c41a1860d9551bf6f891ec
|
"""
This script is used to design the design matrix for our linear regression.
We explore the influence of linear and quadratic drifts on the model
performance.
Script for the filtered data.
Run with:
python noise-pca_filtered_script.py
from this directory
"""
from __future__ import print_function, division
import sys, os, pdb
from scipy import ndimage
from scipy.ndimage import gaussian_filter
from matplotlib import colors
from os.path import splitext
from scipy.stats import t as t_dist
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import scipy
import pprint as pp
import json
#Specicy the path for functions
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./"))
from smoothing import *
from diagnostics import *
from glm import *
from plot_mosaic import *
from mask_filtered_data import *
# Locate the paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
path_dict = {'data_filtered':{
'type' : 'filtered',
'feat' : '.feat',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/'
},
'data_original':{
'type' : '',
'feat': '',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
#run_list = [str(i) for i in range(1,4)]
# Run only for subject 1 and 5 - run 1
run_list = [str(i) for i in range(1,2)]
subject_list = ['1', '5']
d_path = path_dict['data_filtered'] #OR original or filtered
images_paths = [('ds005' + '_sub' + s.zfill(3) + '_t1r' + r, \
data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])) \
for r in run_list \
for s in subject_list]
# set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
# Mask
# To be used with the normal data
thres = 375 #From analysis of the histograms
# To be used with the filtered data
mask_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_mask_2mm.nii'
template_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii'
sm = ''
#sm='not_smooth/'
project_path = project_path + sm
# Create the needed directories if they do not exist
dirs = [project_path+'fig/',\
project_path+'fig/BOLD',\
project_path+'fig/drifts',\
project_path+'fig/pca',\
project_path+'fig/pca/projections/',\
project_path+'fig/linear_model/mosaic',\
project_path+'fig/linear_model/mosaic/middle_slice',\
project_path+'txt_output/',\
project_path+'txt_output/MRSS/',\
project_path+'txt_output/pca/',\
project_path+'txt_output/drifts/']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Progress bar
print("\nStarting noise-pca for filtered data analysis\n")
for image_path in images_paths:
name = image_path[0]
if d_path['type']=='filtered':
#in_brain_img = nib.load('../../../'+
# 'data/ds005/sub001/model/model001/task001_run001.feat/'\
# + 'masked_filtered_func_data_mni.nii.gz')
# Image shape (91, 109, 91, 240)
md = data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/masked_%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])
if not os.path.exists(md):
print("Filtering brain image for: ")
print(str(name))
in_brain_img = make_mask_filtered_data(image_path[1],mask_path)
print("brain image filtered\n")
else:
print("Loading filtered brain image for: ")
print(str(name))
in_brain_img = nib.load(md)
print("brain image loaded\n")
data_int = in_brain_img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
template = nib.load(template_path)
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
Transpose = False
in_brain_mask = (mean_data - 0.0) < 0.01
plt.imshow(plot_mosaic(template_data, transpose=Transpose),\
cmap='gray', alpha=1)
else:
img = nib.load(image_path[1])
data = img.get_data()
mean_data = np.mean(data, axis=-1)
in_brain_mask = mean_data > thres
Transpose = True
plt.contour(plot_mosaic(in_brain_mask, transpose=Transpose), \
cmap='gray' , alpha=1)
# Smoothing with Gaussian filter
smooth_data = smoothing(data,1,range(data.shape[-1]))
# Selecting the voxels in the brain
in_brain_tcs = smooth_data[in_brain_mask, :]
#in_brain_tcs = data[in_brain_mask, :]
vol_shape = data.shape[:-1]
# Plotting the voxels in the brain
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.colorbar()
plt.title('In brain voxel mean values' + '\n' + (d_path['type'] + str(name)))
plt.savefig(project_path+'fig/BOLD/%s_mean_voxels.png'\
%(d_path['type'] + str(name)))
#plt.show()
#plt.clf()
# Convolution with 1 to 4 conditions
convolved = np.zeros((240,5))
for i in range(1,5):
#convolved = np.loadtxt(\
# '../../../txt_output/conv_normal/%s_conv_00%s_canonical.txt'\
# %(str(name),str(i)))
convolved[:,i] = np.loadtxt(\
'../../../txt_output/conv_high_res/%s_conv_00%s_high_res.txt'\
%(str(name),str(i)))
reg_str = ['Intercept','Task', 'Gain', 'Loss', 'Distance', 'Linear Drift',\
'Quadratic drift', 'PC#1', 'PC#2', 'PC#3', 'PC#4']
# Create design matrix X - Including drifts
P = 7 #number of regressors of X including the ones for intercept
n_trs = data.shape[-1]
X = np.ones((n_trs, P))
for i in range(1,5):
X[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X[:,6] = quadratic_drift
# Save the design matrix
np.savetxt(project_path+\
'txt_output/drifts/%s_design_matrix_with_drift.txt'\
%(d_path['type'] + str(name)), X)
# Linear Model - Including drifts
Y = in_brain_tcs.T
betas = npl.pinv(X).dot(Y)
# Save the betas for the linear model including drifts
np.savetxt(project_path+\
'txt_output/drifts/%s_betas_with_drift.txt'%(d_path['type'] + str(name)), betas)
betas_vols = np.zeros(vol_shape + (P,))
betas_vols[in_brain_mask] = betas.T
# Plot
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
betas_vols[~in_brain_mask] = np.nan
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P):
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
#plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with drift) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withdrift_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
plt.close()
#plt.show()
#plt.clf()
#Show the middle slice only
plt.imshow(betas_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel - Slice 18 \n' \
'Projection on %s - %s'\
%(str(reg_str[k]), d_path['type'] + str(name)))
plt.savefig(\
project_path+'fig/linear_model/mosaic/middle_slice/%s_withdrift_middleslice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# PCA Analysis
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[in_brain_mask, :] = projections.T
# Plot the projection of the data on the 5 first principal component
# from SVD
for i in range(1,5):
plt.plot(U[:, i])
plt.title('U' + str(i) + ' vector from SVD \n' + str(name))
plt.imshow(projection_vols[:, :, 18, i])
plt.colorbar()
plt.title('PCA - 18th slice projection on PC#' + str(i) + ' from SVD \n ' +\
d_path['type'] + str(name))
plt.savefig(project_path+'fig/pca/projections/%s_PC#%s.png' \
%((d_path['type'] + str(name),str(i))))
#plt.show()
#plt.clf()
plt.close()
# Variance Explained analysis
s = []
#S is diag -> trace = sum of the elements of S
for i in S:
s.append(i/np.sum(S))
np.savetxt(project_path+\
'txt_output/pca/%s_variance_explained' % (d_path['type'] + str(name)) +\
'.txt', np.array(s[:40]))
ind = np.arange(len(s[1:40]))
plt.bar(ind, s[1:40], width=0.5)
plt.xlabel('Principal Components indices')
plt.ylabel('Explained variance in percent')
plt.title('Variance explained graph \n' + (d_path['type'] + str(name)))
plt.savefig(project_path+\
'fig/pca/%s_variance_explained.png' %(d_path['type'] + str(name)))
#plt.show()
plt.close()
# Linear Model - including PCs from PCA analysis
PC = 3 # Number of PCs to include in the design matrix
P_pca = P + PC
X_pca = np.ones((n_trs, P_pca))
for i in range(1,5):
X_pca[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X_pca[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_pca[:,6] = quadratic_drift
for i in range(3):
X_pca[:,7+i] = U[:, i]
# Save the design matrix - with PCs
np.savetxt(project_path+'txt_output/pca/%s_design_matrix_pca.txt'\
%(d_path['type'] + str(name)), X_pca)
#plt.imshow(X_pca, aspect=0.25)
B_pca = npl.pinv(X_pca).dot(Y)
np.savetxt(project_path+'txt_output/pca/%s_betas_pca.txt'\
%(d_path['type'] + str(name)), B_pca)
b_pca_vols = np.zeros(vol_shape + (P_pca,))
b_pca_vols[in_brain_mask, :] = B_pca.T
# Save betas as nii files
# Plot - with PCs
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
b_pca_vols[~in_brain_mask] = np.nan
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P_pca):
fig = plt.figure(figsize = (8, 5))
#plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap='gray', alpha=0.5)
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with PCA) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withPCA_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
#plt.show()
plt.close()
#Show the middle slice only
plt.imshow(b_pca_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel model - Slice 18 \n' \
'Projection on X%s \n %s'\
%(str(reg_str[k]),d_path['type'] + str(name)))
plt.savefig(\
project_path+\
'fig/linear_model/mosaic/middle_slice/%s_withPCA_middle_slice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# Residuals
MRSS_dict = {}
MRSS_dict['ds005' + d_path['type']] = {}
MRSS_dict['ds005' + d_path['type']]['drifts'] = {}
MRSS_dict['ds005' + d_path['type']]['pca'] = {}
for z in MRSS_dict['ds005' + d_path['type']]:
MRSS_dict['ds005' + d_path['type']][z]['MRSS'] = []
residuals = Y - X.dot(betas)
df = X.shape[0] - npl.matrix_rank(X)
MRSS = np.sum(residuals ** 2 , axis=0) / df
residuals_pca = Y - X_pca.dot(B_pca)
df_pca = X_pca.shape[0] - npl.matrix_rank(X_pca)
MRSS_pca = np.sum(residuals_pca ** 2 , axis=0) / df_pca
MRSS_dict['ds005' + d_path['type']]['drifts']['mean_MRSS'] = np.mean(MRSS)
MRSS_dict['ds005' + d_path['type']]['pca']['mean_MRSS'] = np.mean(MRSS_pca)
# Save the mean MRSS values to compare the performance
# of the design matrices
for design_matrix, beta, mrss, name in \
[(X, betas, MRSS, 'drifts'), (X_pca, B_pca, MRSS_pca, 'pca')]:
MRSS_dict['ds005' + d_path['type']][name]['p-values'] = []
MRSS_dict['ds005' + d_path['type']][name]['t-test'] = []
with open(project_path+'txt_output/MRSS/ds005%s_MRSS.json'\
%(d_path['type']), 'w') as file_out:
json.dump(MRSS_dict, file_out)
# SE = np.zeros(beta.shape)
# for i in range(design_matrix.shape[-1]):
# c = np.zeros(design_matrix.shape[-1])
# c[i]=1
# c = np.atleast_2d(c).T
# SE[i,:]= np.sqrt(\
# mrss* c.T.dot(npl.pinv(design_matrix.T.dot(design_matrix)).dot(c)))
# zeros = np.where(SE==0)
# SE[zeros] = 1
# t = beta / SE
# t[:,zeros] = 0
# # Get p value for t value using CDF of t didstribution
# ltp = t_dist.cdf(abs(t), df)
# p = 1 - ltp # upper tail
# t_brain = t[in_brain_mask]
# p_brain = p[in_brain_mask]
#
# # Save 3D data in .nii files
# for k in range(1,4):
# t_nib = nib.Nifti1Image(t_brain[..., k], affine)
# nib.save(t-test, project_path+'txt_output/%s/%s_t-test_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# p_nib = nib.Nifti1Image(p_brain[..., k], affine)
# nib.save(p-values,project_path+'txt_output/%s/%s_p-values_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# pdb.set_trace()
# pdb.set_trace()
plt.close()
print("=")
print("======================================")
print("\n Noise and PCA analysis for filtered data done")
print("Design Matrix including drift terms stored in project_epsilon/txt_output/drifts/ \n\n")
print("Design Matrix including PCs terms stored in project_epsilon/txt_output/pca/\n\n")
print("Mean MRSS models results in project_epsilon/txt_output/MRSS/ds005filtered_MRSS.json\n\n")
|
ye-zhi/project-epsilon
|
code/utils/scripts/noise-pca_filtered_script.py
|
Python
|
bsd-3-clause
| 14,797
|
[
"Gaussian"
] |
f0d1e48df3e76da2a0962f1c74316d73f5d5be5f6d8943249bdc2506016cb5a9
|
# -*- coding: utf-8 -*-
"""
Django settings for echo_seeds project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, dirname
from configurations import Configuration, values
BASE_DIR = dirname(dirname(__file__))
class Common(Configuration):
BASE_DIR = BASE_DIR
PROJECT_DIR = dirname(BASE_DIR)
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'suit', # Admin theme.
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'avatar', # for user avatars
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'django_extensions',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
'core',
'seedbank',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': 'contrib.sites.migrations'
}
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "G62O.9dUnhTrI[wqvv]YD]EM5@x'rm"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("Brian Dant", 'briandant414@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/echo_seeds')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
LOGIN_URL = "account_login"
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
# Your common stuff: Below this line define 3rd party library settings
|
briandant/echo-seeds
|
echo_seeds/config/common.py
|
Python
|
bsd-3-clause
| 8,943
|
[
"Brian"
] |
42827adf9252966a4499a04cf6e79c62ab57205c89e3193f4a95521fc5143668
|
'''
Created on 2013 mai 24
@author: peio
'''
import subprocess
import os
from tempfile import NamedTemporaryFile
from Bio import SeqIO
from Bio.Seq import Seq
class IsIndelError(Exception):
pass
class OutsideAlignment(Exception):
pass
class BetweenSegments(Exception):
pass
class SeqCoords(object):
'''This class creates a coord system relation between tow secuences.
First it alignes the sequences and creates de coord system.
It uses internally 0 coord system.
The coord system is a list of start, stop segments in both secuences
ej:
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
ATCTAGGCTGCTACGATTAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGCTAGCTGATCG
ATCTAGGCTGCTACGA-TAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGC-AGCTGATCG
0-15, 17-54, 56-64
0-15, 16-53, 54-62
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
012345678901234567890123 4567890123456789012345678901 2345678901234
ATCTAGGCTGCTACGATTAGCTGA-CGATGTTATCGTAGATCTAGCTGATCAT-CTAGCTGATCG
ATCTAGGCT-CTACGATTAGCTGATCGATGTTATCGTAGATC-AGCTGATCATGCTAGCTGATCG
012345678 90123456789012345678901234567890 123456789012345678901234
0-8, 10-23, 24-40, 42-51, 52-62
0-8, 9-22, 24-40, 41-50, 52-62
'''
def __init__(self, seq1, seq2):
"Both secuences are biopython secuences"
self.coord_system = self._get_coord_system(seq1, seq2)
self.seq1_name = seq1.id
self.seq2_name = seq2.id
self._seq2_len = len(seq2)
def _get_coord_system(self, seq1, seq2):
out_fhand, reverse = get_water_alignment(seq1, seq2)
self.reverse = reverse
coord_system = build_relations_from_aligment(out_fhand)
out_fhand.close()
return coord_system
def _reverse_pos(self, pos):
reverse = self.reverse
if reverse:
return self._seq2_len - pos - 1
else:
return pos
def _get_segment(self, pos, seq_name):
'returns the segment index of the given position'
segments = self.coord_system[seq_name]
for index, (start, stop) in enumerate(segments):
if pos >= start and pos <= stop:
return index, (start, stop)
if pos < segments[0][0] or pos > segments[-1][1]:
raise OutsideAlignment
else:
raise BetweenSegments
def _to_seq_pos(self, pos, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
segment2 = self._get_segment(pos, seq2_name)
segment2_index, segment2 = segment2
segment1 = self.coord_system[seq1_name][segment2_index]
return segment1[0] + pos - segment2[0]
def to_seq1_pos(self, seq2_pos):
seq2_pos = self._reverse_pos(seq2_pos)
return self._to_seq_pos(seq2_pos, to_seq1=True)
def to_seq2_pos(self, seq1_pos):
seq2_pos = self._to_seq_pos(seq1_pos, to_seq1=False)
return self._reverse_pos(seq2_pos)
def _to_seq_slice(self, start, end, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
stop = end - 1
segment2_start = self._get_segment(start, seq2_name)
segment2_stop = self._get_segment(stop, seq2_name)
segment2_index_start, segment2_start = segment2_start
segment2_index_stop, segment2_stop = segment2_stop
if segment2_index_start != segment2_index_stop:
raise BetweenSegments
segment1 = self.coord_system[seq1_name][segment2_index_start]
start = segment1[0] + start - segment2_start[0]
stop = segment1[0] + stop - segment2_stop[0]
return (start, stop + 1)
def to_seq1_slice(self, start, end):
if self.reverse:
start = self._reverse_pos(start)
end = self._reverse_pos(end)
slice2 = self._to_seq_slice(start, end, to_seq1=True)
if self.reverse:
return slice2[1], slice2[0]
return slice2
def to_seq2_slice(self, start, end):
slice1 = self._to_seq_slice(start, end, to_seq1=False)
if self.reverse:
start = self._reverse_pos(slice1[1])
end = self._reverse_pos(slice1[0])
else:
start, end = slice1
return (start, end)
def build_relations_from_aligment(fhand):
'It returns a relations dict given an alignment in markx10 format'
#print open(fhand.name).read()
#we parse the aligment
in_seq_section = 0
seq, al_start, seq_len = None, None, None
seq0_name = None
for line in fhand:
line = line.strip()
if not line:
continue
if line[0] == '>' and line[1] != '>':
if seq0_name is None:
seq0_name = line.split()[0][1:]
else:
seq1_name = line.split()[0][1:]
if in_seq_section:
seq0 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq0_name}
in_seq_section += 1
seq = ''
continue
if not in_seq_section:
continue
if '; sq_len:' in line:
seq_len = int(line.split(':')[-1])
if '; al_display_start:' in line:
al_start = int(line.split(':')[-1])
if line[0] not in (';', '#'):
seq += line
seq1 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq1_name}
#now we get the segments
gap = '-'
segments = []
segment0, segment1 = None, None
seq0_start, seq1_start = seq0['al_start'], seq1['al_start']
seq0_start_delta, seq1_start_delta = seq0_start, seq1_start
seq0_delta, seq1_delta = 0, 0
for index, (nucl0, nucl1) in enumerate(zip(seq0['seq'], seq1['seq'])):
seq0_index = seq0_start_delta + index - seq0_delta
seq1_index = seq1_start_delta + index - seq1_delta
if nucl0 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq0_start = seq0_index
seq1_start = seq1_index + 1
seq0_delta += 1
elif nucl1 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq1_start = seq1_index
seq0_start = seq0_index + 1
seq1_delta += 1
if segment0 and segment1:
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
segment0, segment1 = None, None
else:
segment0 = seq0_start, seq0_index
segment1 = seq1_start, seq1_index
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
relations = {}
for seg in segments:
for seq_name, limits in seg.items():
if seq_name not in relations:
relations[seq_name] = []
relations[seq_name].append(limits)
return relations
def _get_water_score(fhand):
for line in fhand:
if line.startswith('# Score:'):
return float(line.split(':')[1].strip())
return None
def get_water_alignment(seq1, seq2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10'):
out_fhand = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False)
out_fhand2 = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=True)
forw_score = _get_water_score(out_fhand)
rev_score = _get_water_score(out_fhand2)
if forw_score > rev_score:
out_fhand.seek(0)
return out_fhand, False
else:
out_fhand2.seek(0)
return out_fhand2, True
def _do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False):
seq1_fhand = NamedTemporaryFile()
seq2_fhand = NamedTemporaryFile()
SeqIO.write(seq1, seq1_fhand, 'fasta')
SeqIO.write(seq2, seq2_fhand, 'fasta')
seq1_fhand.flush()
seq2_fhand.flush()
cmd = ['water', '-asequence', seq1_fhand.name, '-bsequence',
seq2_fhand.name, '-outfile', out_fhand.name, '-gapopen',
str(gap_open), '-gapextend', str(gap_extend), '-aformat3', out_fmt]
if reverse2:
cmd.append('-sreverse2')
stdout = open(os.devnull, 'w')
stderr = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=stdout, stderr=stderr)
def get_amino_change(seq_ref, seq_estscan, snv):
if snv.is_indel:
raise IsIndelError()
position = snv.pos
alt_allele = snv.alleles[1]
seq_coord = SeqCoords(seq_ref, seq_estscan)
estscan_pos = seq_coord.to_seq2_pos(position)
if estscan_pos is None:
return None
estscan_frame = (estscan_pos % 3) + 1
estscan_start = estscan_pos + estscan_frame - 1
estscan_stop = estscan_start + 2
# check if there is a frameshift in the ref_seq
ref_slice = seq_coord.to_seq1_slice(estscan_start, estscan_stop)
if ref_slice is None:
return None
ref_seq_aa = seq_ref[ref_slice[0]: ref_slice[1] + 1].seq[:3].translate()
estscan_seq_aa = seq_estscan[estscan_start: estscan_stop + 1].seq[:3]
ref_aa = str(estscan_seq_aa.translate())
if str(ref_seq_aa) != str(ref_aa):
return None
aminos = {'ref_amino': ref_aa, 'alt_amino': []}
for alt_allele in snv.alleles[1:]:
alt_seq = [nucl for nucl in (estscan_seq_aa)]
alt_seq[estscan_frame - 1] = alt_allele
alt_seq = Seq("".join(alt_seq))
alt_aa = str(alt_seq.translate())
aminos['alt_amino'].append(alt_aa)
return aminos
|
JoseBlanca/vcf_crumbs
|
vcf_crumbs/prot_change.py
|
Python
|
gpl-3.0
| 10,257
|
[
"Biopython"
] |
8c29496d60888c492918b0c445988a7b9e2312fde71b5b2a4f91e3d928bf4613
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import requests
import logging
import time
from nose.tools import assert_equals
from nose.tools import assert_true
from nose.tools import assert_is
from nose.tools import assert_in
from nose.tools import assert_raises
from nose.tools import assert_raises_regexp
import rmock
from rmock import call
from rmock.errors import RmockError
from rmock.tools import find_random_port
from testtools import http_call
class TestRmockStartStopServer(object):
def test_server_already_running(self):
port = find_random_port()
with rmock.run(port=port):
assert_raises_regexp(
RmockError,
'error starting server process.*port.*%s.*' % port,
rmock.run,
port=port
)
def test_stop_server(self):
port = find_random_port()
with rmock.run(port=port) as mock:
#time.sleep(1)
assert_equals(http_call(mock, "func").text, '')
mock.stop_server()
# to prevent connection reset by peer socket error
time.sleep(.1)
assert_raises((requests.ConnectionError),
http_call,
mock,
'func')
mock.start_server()
assert_equals(http_call(mock, "func").text, '')
@rmock.patch("http", classvar="http_mock")
class TestRmockStartStopServerClassDecorator(object):
'''While patch as using class decorator mock server should be started
and ready to handle requests always, event if it was stopped in previous test execution
'''
def test_function1(self):
assert_equals(http_call(self.http_mock, "func").text, '')
self.http_mock.stop_server()
def test_function2(self):
assert_equals(http_call(self.http_mock, "func").text, '')
self.http_mock.stop_server()
|
tikan/rmock
|
tests/func_tests/test_start_stop_server.py
|
Python
|
lgpl-3.0
| 2,709
|
[
"VisIt"
] |
5d5e364665ba28fb04c2a44dc9380028a3321b1518b04abdced864f067c364c4
|
from __pyosshell__ import *
from __molecules__ import *
from __qmshell__ import *
import xml.dom.minidom as xml
def WriteSystemXml(name,gro,xyz,xml = 'system.xml'):
mol = SingleMoleculeFromGro(gro)[0]
mol.fragment()
mol.make_dict()
mol.name = name
es,xyz = e_xyz_from_xyz(xyz)
# CHECK CONGRUENCY
for atm, e, xyz in zip(mol.atoms,es,xyz):
print atm.fragName, atm.name, "=>", e, xyz
assert atm.name[0:1] == e or atm.name[0:2].lower() == e.lower()
outt = open(xml,'w')
# MOLECULE HEADER
outt.write('<topology>\n')
outt.write('\t<molecules>\n')
outt.write('\t<molecule>\n')
outt.write('\t\t<name>%s</name>\n' % mol.name)
outt.write('\t\t<mdname>%s</mdname>\n' % mol.name)
outt.write('\t\t<segments>\n')
# SEGMENT HEADER
outt.write('\t\t<segment>\n')
outt.write('\t\t\t<name>%s</name>\n' % mol.name)
# XYZ FILE
outt.write('\t\t\t<qmcoords>QC_FILES/%s.xyz</qmcoords>\n' % mol.name)
# MPS FILES
outt.write('\t\t\t<multipoles_n>MP_FILES/%s_n.mps</multipoles_n>\n' % (mol.name))
outt.write('\t\t\t<multipoles_h>MP_FILES/%s_h.mps</multipoles_h>\n' % (mol.name))
outt.write('\t\t\t<multipoles_e>MP_FILES/%s_e.mps</multipoles_e>\n' % (mol.name))
outt.write('\t\t\t<map2md>0</map2md>\n')
# FRAGMENTS
outt.write('\t\t\t<fragments>\n')
for frag in mol.frags:
# FRAGMENT HEADER
fragName = '%s%d' % (frag.name,mol.frags.index(frag))
outt.write('\t\t\t<fragment>\n')
outt.write('\t\t\t\t<name>%s</name>\n' % fragName)
# MD-ATOMS
mdatoms = ''
for atom in frag.atoms:
mdatoms += ' %-11s ' % ('%d:%s:%s' % (atom.fragId, atom.fragName, atom.name))
outt.write('\t\t\t\t<mdatoms>%s</mdatoms>\n' % mdatoms)
# QM-ATOMS, M-POLES, WEIGHTS
qmatoms = ''
weights = ''
ms = { 'H' : 1, 'C' : 12, 'N' : 14, 'O' : 16, 'S' : 32, 'Zn' : 65 }
for atom in frag.atoms:
idx = mol.atoms.index(atom)
typ = es[idx]
qmatoms += ' %-11s ' % ('%d:%s' % (atom.Id,typ))
weights += ' %-11s ' % ('%d' % ms[typ])
outt.write('\t\t\t\t<qmatoms>%s</qmatoms>\n' % qmatoms)
outt.write('\t\t\t\t<mpoles> %s</mpoles>\n' % qmatoms)
outt.write('\t\t\t\t<weights>%s</weights>\n' % weights)
# LOCAL FRAME
localframe = ''
count_atoms = 0
for atom in frag.atoms:
if count_atoms == 3: break
idx = mol.atoms.index(atom)
typ = es[idx]
if typ != 'H':
count_atoms += 1
localframe += ' %d ' % atom.Id
outt.write('\t\t\t\t<localframe>%s</localframe>\n' % localframe)
outt.write('\t\t\t</fragment>\n')
# CLOSING TAGS
outt.write('\t\t\t</fragments>\n')
outt.write('\t\t</segment>\n')
outt.write('\t\t</segments>\n')
outt.write('\t</molecule>\n')
outt.write('\t</molecules>\n')
outt.write('</topology>\n')
outt.close()
return
def WriteSystemXmlUsingPattern(name, gro, xyz, mpsfile, gropattern, xyzpattern, mpspattern, xml='system.xml', outt=None, map2md=False,
segment_properties={}):
assert len(gropattern) == len(xyzpattern)
assert len(gropattern) == len(mpspattern)
mol = SingleMoleculeFromGro(gro)[0]
mol.fragment()
mol.make_dict()
mol.name = name
es,xyz = e_xyz_from_xyz(xyz)
mps = mps_from_mpsfile(mpsfile)
if outt == None:
outt = open(xml,'w')
outt.write('<topology>\n')
outt.write('\t<molecules>\n')
# MOLECULE HEADER
outt.write('\t<molecule>\n')
outt.write('\t\t<name>%s</name>\n' % mol.name)
outt.write('\t\t<mdname>%s</mdname>\n' % mol.name)
outt.write('\t\t<segments>\n')
# SEGMENT HEADER
outt.write('\t\t<segment>\n')
outt.write('\t\t\t<name>%s</name>\n' % mol.name)
# XYZ FILE
outt.write('\t\t\t<qmcoords>QC_FILES/%s.xyz</qmcoords>\n' % mol.name)
# OPTIONAL PROPERTIES (WRITTEN IF SUPPLIED)
orbital_keys = ['orbitals', 'basisset', 'torbital_h', 'torbital_e']
energy_keys = ['U_cC_nN_h', 'U_nC_nN_h', 'U_cN_cC_h', 'U_cC_nN_e', 'U_nC_nN_e', 'U_cN_cC_e']
for key in orbital_keys:
if segment_properties.has_key(key):
outt.write('\t\t\t<{key:s}>{val:s}</{key:s}>\n'.format(key=key, val=str(segment_properties[key])))
for key in energy_keys:
if segment_properties.has_key(key):
outt.write('\t\t\t<{key:s}>{val:s}</{key:s}>\n'.format(key=key, val=str(segment_properties[key])))
# MPS FILES
outt.write('\t\t\t<multipoles_n>MP_FILES/%s_n.mps</multipoles_n>\n' % (mol.name))
outt.write('\t\t\t<multipoles_h>MP_FILES/%s_h.mps</multipoles_h>\n' % (mol.name))
outt.write('\t\t\t<multipoles_e>MP_FILES/%s_e.mps</multipoles_e>\n' % (mol.name))
outt.write('\t\t\t<map2md>%d</map2md>\n' % (0 if map2md == False else 1))
# FRAGMENTS
outt.write('\t\t\t<fragments>\n')
ctp_frag_count = 0
gro_atom_count = 0
xyz_atom_count = 0
mps_atom_count = 0
for ctp_frag_size, xyz_frag_size, mps_frag_size in zip(gropattern, xyzpattern, mpspattern):
ctp_frag_count += 1
first_gro_atom_in_frag = mol.atoms[gro_atom_count]
fragName = '%s%d' % (first_gro_atom_in_frag.fragName, ctp_frag_count)
outt.write('\t\t\t<fragment>\n')
outt.write('\t\t\t\t<name>%s</name>\n' % fragName)
# MD-ATOMS, QM-ATOMS, M-POLES, WEIGHTS
mdatoms = ''
qmatoms = ''
mpoles = ''
weights = ''
local_frame_str = ''
local_frame_ids = []
ms = { 'H' : 1, 'C' : 12, 'N' : 14, 'O' : 16, 'S' : 32, 'Zn' : 65 }
for i in range(ctp_frag_size):
gro_atom_count += 1
atom = mol.atoms[gro_atom_count-1]
mdatoms += ' %-11s ' % ('%d:%s:%s' % (atom.fragId, atom.fragName, atom.name))
if i < xyz_frag_size:
xyz_atom_count += 1
id = xyz_atom_count
typ = es[id-1]
m = ms[typ]
qmatoms += ' %-11s ' % ('%d:%s' % (id, typ))
weights += ' %-11s ' % ('%d' % m)
if len(local_frame_ids) < 3 and typ != 'H':
local_frame_ids.append(id)
local_frame_str += ' %d ' % id
else:
qmatoms += ' %-11s ' % ':'
weights += ' %-11s ' % '0'
if i < mps_frag_size:
mps_atom_count += 1
id = mps_atom_count
typ = mps[id-1].e
mpoles += ' %-11s ' % ('%d:%s' % (id, typ))
else:
pass
outt.write('\t\t\t\t<mdatoms>%s</mdatoms>\n' % mdatoms)
outt.write('\t\t\t\t<qmatoms>%s</qmatoms>\n' % qmatoms)
outt.write('\t\t\t\t<mpoles> %s</mpoles>\n' % mpoles)
outt.write('\t\t\t\t<weights>%s</weights>\n' % weights)
outt.write('\t\t\t\t<localframe>%s</localframe>\n' % local_frame_str)
outt.write('\t\t\t</fragment>\n')
# CLOSING TAGS
outt.write('\t\t\t</fragments>\n')
outt.write('\t\t</segment>\n')
outt.write('\t\t</segments>\n')
outt.write('\t</molecule>\n')
if outt == None:
outt.write('\t</molecules>\n')
outt.write('</topology>\n')
outt.close()
return
def TopItpFromXyzGro(xyzfile, grofile, molname):
mols = SingleMoleculeFromGro(grofile)
atoms = mols[0].atoms
if xyzfile != None:
elem,xyz = e_xyz_from_xyz(xyzfile)
else:
elem = [ atom.name[0:1] for atom in atoms ]
xyz = [ np.array([0,0,0]) for i in range(len(atoms)) ]
# Sanity checks
assert len(atoms) == len(elem) == len(xyz)
for atm,e in zip(atoms,elem):
if atm.name[0:1] != e[0:1]:
print "WARNING: Possible mismatch", atm.name, "<>", e
os.system('mkdir -p FORCEFIELD')
os.chdir('FORCEFIELD')
# WRITE TOPOLOGY FILE HEADER
ofs = open('%s.itp' % molname, 'w')
ofs.write('[ moleculetype ]\n')
ofs.write('; Name nrexcl\n')
ofs.write('%s 3\n' % molname)
ofs.write('\n')
ofs.write('[ atoms ]\n')
ofs.write('; nr type rsdnr rsd atom cgnr charge\n')
# List of atoms in molecule
for atm,e in zip(atoms,elem):
ofs.write('{nr:5d} {typ:3s} {rsdnr:5d} {rsd:3s} {atom:3s} {cgnr:5d} {chrg:1.3f}\n'.format(\
nr=atm.Id, typ=e, rsdnr=atm.fragId, rsd=atm.fragName, atom=atm.name, cgnr=atm.fragId+1, chrg=0.0))
ofs.write('\n')
# Potentials
ofs.write('[ bonds ]\n')
ofs.write('; ai aj funct c0 c1 c2 c3\n')
ofs.write('\n')
ofs.write('[ pairs ]\n')
ofs.write('; ai aj funct c0 c1 c2 c3\n')
ofs.write('\n')
## System
#ofs.write('[ system ]\n')
#ofs.write('%1s\n' % molname)
# WRITE ITP HEADER
# Collect types
atps = []
for e in elem:
if not PTABLE[e] in atps:
atps.append(PTABLE[e])
ofs = open('%s_forcefield.itp' % molname, 'w')
# Atom types
ofs.write('[ atomtypes ]\n')
ofs.write('; name mass charge ptype sigma eps\n')
for atp in atps:
ofs.write(' {name:2s} {mass:1.5f} +0.00 A 0.325 0.293\n'.format(\
name=atp, mass=PTABLE[e].mass))
ofs.close()
# DEFAULT ITP
ofs = open('default.itp', 'w')
# Defaults
ofs.write('[ defaults ]\n')
ofs.write('; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ\n')
ofs.write(' 1 3 yes 0.5 0.5\n')
ofs.write('\n')
ofs.close()
os.chdir('../')
# SYSTEM TOP
ofs = open('system.top', 'w')
ofs.write('; FORCEFIELD\n')
ofs.write('#include "./FORCEFIELD/default.itp"\n')
ofs.write('#include "./FORCEFIELD/%s_forcefield.itp"\n' % molname)
ofs.write('\n')
ofs.write('; MOLECULES\n')
ofs.write('#include "./FORCEFIELD/%s.itp"\n' % molname)
ofs.write('\n')
ofs.write('[ system ]\n')
ofs.write('%s\n' % molname)
ofs.write('\n')
ofs.write('[ molecules ]\n')
ofs.write('%s 1\n' % molname)
ofs.write('\n')
ofs.close()
return
def ConvertToGhost(mol):
# Atom name conversion rule
# name = 'C' + name[0:1] + counter
# Molecule name
# name = 'G' + name
# Fragment name
# name = 'G' + name[0:2]
series = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if mol.frags == []:
mol.fragment()
mol.make_dict()
for frag in mol.frags:
assert len(frag.atoms) <= len(series)
for frag in mol.frags:
counter = 0
frag.name = 'C' + frag.name[0:2]
for atom in frag.atoms:
atom.name = 'C%s%s' % (atom.name[0:1], series[counter])
atom.fragName = 'G' + atom.fragName[0:2]
counter += 1
mol.name = 'G' + mol.name
return mol
def GhostTopItpFromXyzGro(xyzfile, grofile, molname):
mols = SingleMoleculeFromGro(grofile)
atoms = mols[0].atoms
elem,xyz = e_xyz_from_xyz(xyzfile)
# Sanity checks
assert len(atoms) == len(elem) == len(xyz)
for atm,e in zip(atoms,elem):
if atm.name[0:1] != e[0:1]:
print "WARNING: Possible mismatch", atm.name, "<>", e
# Convert to ghost
mol = ConvertToGhost(mols[0])
atoms = mol.atoms
elem = [ 'C' for i in range(len(atoms)) ]
mol.write_gro(molname+'.gro')
# Write ghost xyz
e_xyz_to_xyz(elem, xyz, molname + '.xyz')
os.system('mkdir -p FORCEFIELD')
os.chdir('FORCEFIELD')
# WRITE TOPOLOGY FILE HEADER
ofs = open('%s.itp' % molname, 'w')
ofs.write('[ moleculetype ]\n')
ofs.write('; Name nrexcl\n')
ofs.write('%s 3\n' % molname)
ofs.write('\n')
ofs.write('[ atoms ]\n')
ofs.write('; nr type rsdnr rsd atom cgnr charge\n')
# List of atoms in molecule
for atm,e in zip(atoms,elem):
ofs.write('{nr:5d} {typ:3s} {rsdnr:5d} {rsd:3s} {atom:3s} {cgnr:5d} {chrg:1.3f}\n'.format(\
nr=atm.Id, typ=e, rsdnr=atm.fragId, rsd=atm.fragName, atom=atm.name, cgnr=atm.fragId+1, chrg=0.0))
ofs.write('\n')
# Potentials
ofs.write('[ bonds ]\n')
ofs.write('; ai aj funct c0 c1 c2 c3\n')
ofs.write('\n')
ofs.write('[ pairs ]\n')
ofs.write('; ai aj funct c0 c1 c2 c3\n')
ofs.write('\n')
## System
#ofs.write('[ system ]\n')
#ofs.write('%1s\n' % molname)
# WRITE ITP HEADER
# Collect types
atps = []
for e in elem:
if not PTABLE[e] in atps:
atps.append(PTABLE[e])
ofs = open('%s_forcefield.itp' % molname, 'w')
# Atom types
ofs.write('[ atomtypes ]\n')
ofs.write('; name mass charge ptype sigma eps\n')
for atp in atps:
ofs.write(' {name:2s} {mass:1.5f} +0.00 A 0.325 0.293\n'.format(\
name=atp, mass=PTABLE[e].mass))
ofs.close()
# DEFAULT ITP
ofs = open('default.itp', 'w')
# Defaults
ofs.write('[ defaults ]\n')
ofs.write('; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ\n')
ofs.write(' 1 3 yes 0.5 0.5\n')
ofs.write('\n')
ofs.close()
os.chdir('../')
# SYSTEM TOP
ofs = open('system.top', 'w')
ofs.write('; FORCEFIELD\n')
ofs.write('#include "./FORCEFIELD/default.itp"\n')
ofs.write('#include "./FORCEFIELD/%s_forcefield.itp"\n' % molname)
ofs.write('\n')
ofs.write('; MOLECULES\n')
ofs.write('#include "./FORCEFIELD/%s.itp"\n' % molname)
ofs.write('\n')
ofs.write('[ system ]\n')
ofs.write('%s\n' % molname)
ofs.write('\n')
ofs.write('[ molecules ]\n')
ofs.write('%s 1\n' % molname)
ofs.write('\n')
ofs.close()
return
def ctp_map_auto(sql='state.sql', xml='system.xml', md_files='MD_FILES'):
abort = False
ext_dict = dict_by_ext(md_files)
# MD COORDINATE + TOPOLOGY FILE
tpr = ext_dict['tpr']
try:
gro = ext_dict['gro']
except KeyError:
gro = ext_dict['pdb']
except KeyError:
print "No coordinate file in", os.getcwd(), md_files
abort = True
# DATABASE + MAPPING FILE
if sql in os.listdir('./'):
print "ERROR SQL-file '%s' already exists, will abort." % sql
abort = True
if not xml in os.listdir('./'):
print "ERROR XML-file '%s' missing, will abort." % xml
abort = True
# EXECUTE
opts = '-c %s -t %s -s %s -f %s' % (gro, tpr, xml, sql)
print 'ctp_map %s' % (opts)
if abort: sys.exit(1)
os.system('ctp_map %s' % opts)
return
def ctp_run_auto(exe, xml='options.xml', sql='state.sql', save=0, threads=1):
abort = False
# RUN THROUGH CHECKLIST
if not xml in os.listdir('./'):
print "ERROR Options file '%s' missing, will abort." % xml
abort = True
if not sql in os.listdir('./'):
print "ERROR Sql file '%s' missing, will abort." % sql
abort = True
# EXECUTE
opts = '-e "%s" -o %s -f %s -s %d -t %d' % (exe,xml,sql,save,threads)
print 'ctp_run %s' % (opts)
if abort: sys.exit(1)
os.system('ctp_run %s' % opts)
return
def write_xqmp_options(jobfile):
ofs = open('options.xml','w')
ofs.write('''<options>
<xqmultipole>
<multipoles>system.xml</multipoles> <!-- XML allocation polar sites -> fragment -->
<control>
''')
ofs.write(' <job_file>%s</job_file>\n' % jobfile)
ofs.write(''' <emp_file>mps.tab</emp_file> <!-- Allocation of .mps files to segs; for template, run 'stateserver' with key = 'emp' -->
<pdb_check>0</pdb_check> <!-- Output - Check mapping of polar sites -->
<!--write_chk></write_chk--> <!-- Write x y z charge file with dipoles split onto point charges spaced 1fm apart -->
<format_chk>xyz</format_chk> <!-- 'gaussian' or 'xyz' -->
<split_dpl>1</split_dpl> <!-- '0' do not split dipoles onto point charges, '1' do split -->
<dpl_spacing>1e-4</dpl_spacing> <!-- Spacing a [nm] to be used when splitting dipole onto point charges: d = q * a -->
</control>
<coulombmethod>
<method>cut-off</method>
<cutoff1>3.0</cutoff1>
<cutoff2>6.0</cutoff2>
</coulombmethod>
<tholemodel>
<induce>1</induce>
<induce_intra_pair>1</induce_intra_pair>
<exp_damp>0.39</exp_damp>
<scaling>0.25 0.50 0.75</scaling>
</tholemodel>
<convergence>
<wSOR_N>0.30</wSOR_N>
<wSOR_C>0.30</wSOR_C>
<max_iter>512</max_iter>
<tolerance>0.001</tolerance>
</convergence>
</xqmultipole>
</options>
''')
ofs.close()
return
def write_ewald_options(jobfile):
ofs = open('options.xml','w')
ofs.write('''<options>
<ewald>
<multipoles>system.xml</multipoles> <!-- XML allocation polar sites -> fragment -->
<control>
''')
ofs.write(' <job_file>%s</job_file>\n' % jobfile)
ofs.write(''' <mps_table>mps.tab</mps_table> <!-- Allocation of .mps files to segs; for template, run 'stateserver' with key = 'emp' -->
<pdb_check>0</pdb_check> <!-- Output - Check mapping of polar sites -->
</control>
<coulombmethod>
<method>ewald</method>
<cutoff>12.0</cutoff>
<shape>xyslab</shape>
</coulombmethod>
<polarmethod>
<method>thole</method>
<induce>1</induce>
<cutoff>3.0</cutoff>
</polarmethod>
<convergence>
<energy>1e-5</energy>
<kfactor>100</kfactor>
<rfactor>6.0</rfactor>
<kmetric>1 1 1</kmetric>
</convergence>
</ewald>
</options>
''')
ofs.close()
return
class Batch(object):
def __init__(self, xmlfile = None):
self.jobs = []
if xmlfile != None:
tree = xml.parse(xmlfile)
for node in tree.getElementsByTagName('job'):
self.jobs.append(Job(node=node))
return
def AddJob(self, jobTag, jobInput):
jobId = len(self.jobs)+1
newJob = Job(jobId,jobTag,jobInput)
self.jobs.append(newJob)
return
def WriteToFile(self, outFile = 'jobs.xml'):
if outFile in os.listdir('./'):
print "Already exists:", os.getcwd(), outFile
sys.exit(1)
outt = open(outFile,'w')
outt.write('<jobs>\n')
for job in self.jobs:
job.WriteToStream(outt)
outt.write('</jobs>\n')
outt.close()
return
def ProcessAsEwaldSiteJobs(self, states=['n','e','h']):
segID_state_dict = {}
for job in self.jobs:
jobID = int(job.id_)
tag = job.tag_.split(':')
out = job.output_.split()
stat = job.status_
segID = int(tag[0])
segName = tag[1]
state = tag[2]
#x = float(out[35])
#y = float(out[36])
#z = float(out[37])
pos = np.array([0,0,0])
pp = float(out[3])
if state not in states: continue
jobResult = JobResultXqm(jobID, segID, segName, state, pos, 0, pp, 0)
try:
segID_state_dict[segID][state] = jobResult
except KeyError:
segID_state_dict[segID] = {}
segID_state_dict[segID][state] = jobResult
# SUBTRACT ENERGIES TO OBTAIN IP, EA
assert 'n' in states
chrgStates = states[:]
chrgStates.remove('n')
ips_eas = { 'e' : [], 'h' : [] }
keyIDs = segID_state_dict.keys()
keyIDs.sort()
for ID in keyIDs:
for state in chrgStates:
n = segID_state_dict[ID]['n']
c = segID_state_dict[ID][state]
cn = n.SubtractFrom(c)
ips_eas[state].append(cn)
#n.PrintInfo()
#c.PrintInfo()
#cn.PrintInfo()
return ips_eas
def ProcessAsXqmSiteJobs(self, states=['n','e','h']):
# OUTPUT STRUCTURE
# 0 1 2 3 4 5 6 7
# 1 1:C60:n TT +0.0000000 PP +0.0000000 PU +0.0000000
#
# 8 9 10 11 12 13 14 15
# UU +0.0000000 F00 +0.0000000 F01 +0.0000000 F02 +0.0000000
#
# 16 17 18 19 20 21 22 23
# F11 +0.0000000 F12 +0.0000000 M0 +0.0000000 M1 +0.0000000
#
# 24 25 26 27 28 29 30 31 32 33
# M2 +0.0000000 |QM0| 1 |MM1| 30 |MM2| 90 IT 0
#
# 34 35 36 37
# XYZ +4.5579834 +1.0736500 +0.3511333
#
# ASSEMBLE INTO DICTIONARY
segID_state_dict = {}
for job in self.jobs:
jobID = int(job.id_)
tag = job.tag_.split(':')
out = job.output_.split()
stat = job.status_
segID = int(tag[0])
segName = tag[1]
state = tag[2]
x = float(out[35])
y = float(out[36])
z = float(out[37])
pos = np.array([x,y,z])
tt = float(out[3])
pp = float(out[5])
pu = float(out[7])
if state not in states: continue
jobResult = JobResultXqm(jobID, segID, segName, state, pos, tt, pp, pu)
try:
segID_state_dict[segID][state] = jobResult
except KeyError:
segID_state_dict[segID] = {}
segID_state_dict[segID][state] = jobResult
# SUBTRACT ENERGIES TO OBTAIN IP, EA
assert 'n' in states
chrgStates = states[:]
chrgStates.remove('n')
ips_eas = { 'e' : [], 'h' : [] }
keyIDs = segID_state_dict.keys()
keyIDs.sort()
for ID in keyIDs:
for state in chrgStates:
n = segID_state_dict[ID]['n']
c = segID_state_dict[ID][state]
cn = n.SubtractFrom(c)
ips_eas[state].append(cn)
#n.PrintInfo()
#c.PrintInfo()
#cn.PrintInfo()
return ips_eas
class JobResultXqm(object):
def __init__(self, jobID, segID, segName, state, pos, tt, pp, pu):
self.jobID = jobID
self.segID = segID
self.segName = segName
self.state = state
self.pos = pos
self.tt = tt
self.pp = pp
self.pu = pu
return
def PrintInfo(self):
print "ID %5d %5d %-10s %-2s XYZ %+1.7e %+1.7e %+1.7e TT %+1.7e PP %+1.7e PU %+1.7e" % \
(self.jobID, self.segID, self.segName, self.state,
self.pos[0], self.pos[1], self.pos[2], self.tt, self.pp, self.pu)
return
def SubtractFrom(self, other):
assert other.segID == self.segID
assert other.segName == self.segName
if self.state == 'n': assert other.state in ['e','h']
elif self.state in ['e','h']: assert self.state == 'n'
else: assert False # State combination other than 'n' <> ['e','h']? Error.
dtt = other.tt - self.tt
dpp = other.pp - self.pp
dpu = other.pu - self.pu
dstate = '%s%s' % (other.state,self.state)
return JobResultXqm(0, self.segID, self.segName, dstate, self.pos, dtt, dpp, dpu)
class JobResultEwald(object):
def __init__(self, jobID, segID, segName, state, pos, pp):
self.jobID = jobID
self.segID = segID
self.segName = segName
self.state = state
self.pos = pos
self.pp = pp
class Job(object):
def __init__(self, id_ = None, tag_ = None, input_ = None, node = None):
self.id_ = id_
self.tag_ = tag_
self.input_ = input_
self.status_ = 'AVAILABLE'
self.time_ = None
self.host_ = None
self.output_ = None
self.error_ = None
self.node_ = node
if node != None:
self.LoadFromXmlNode(node)
return
def LoadFromXmlNode(self, node):
self.id_ = node.getElementsByTagName('id')[0].firstChild.nodeValue
self.tag_ = node.getElementsByTagName('tag')[0].firstChild.nodeValue
self.input_ = node.getElementsByTagName('input')[0].firstChild.nodeValue
status = node.getElementsByTagName('status')
if status == []: self.status_ = 'AVAILABLE'
else: self.status_ = status[0].firstChild.nodeValue
time = node.getElementsByTagName('time')
if time == []: self.time_ = None
else: self.time_ = time[0].firstChild.nodeValue
host = node.getElementsByTagName('host')
if host == []: self.host_ = None
else: self.host_ = host[0].firstChild.nodeValue
output = node.getElementsByTagName('output')
if output == []: self.output_ = None
else: self.output_ = output[0].firstChild.nodeValue
error = node.getElementsByTagName('error')
if error == []: self.error_ = None
else:
if self.status_ == 'COMPLETE':
try:
self.error_ = error[0].firstChild.nodeValue
print "Has error", self.id_, self.tag_, self.input_, ":", self.error_
except AttributeError:
#print "Has error", self.id_, self.tag_, self.input_, ", yet complete."
pass
return
def WriteToStream(self, ofs):
# TODO Extend this function to incorporate output, timestamp, ...
ofs.write('<job>\n')
ofs.write('\t<id>%d</id>\n' % self.id_)
ofs.write('\t<tag>%s</tag>\n' % self.tag_)
ofs.write('\t<input>%s</input>\n' % self.input_)
ofs.write('</job>\n')
return
def jobs_from_pop(pop, includeList = None, states = ['n','e','h']):
# INCLUDE LIST
checkInclude = True
# GENERATE BATCH
batch = Batch()
if includeList == None:
checkInclude = False
for mol in pop.mols:
if checkInclude and mol.Id not in includeList:
continue
for state in states:
jobTag = '%d:%s:%s' % (mol.Id,mol.name,state)
jobInput = '%d:%s:MP_FILES/%s_%s.mps' % (mol.Id,mol.name,mol.name,state)
batch.AddJob(jobTag,jobInput)
else:
for idx in includeList:
mol = pop.mols[idx-1]
assert idx == mol.Id
for state in states:
jobTag = '%d:%s:%s' % (mol.Id,mol.name,state)
jobInput = '%d:%s:MP_FILES/%s_%s.mps' % (mol.Id,mol.name,mol.name,state)
batch.AddJob(jobTag,jobInput)
return batch
|
12AngryMen/votca-scripts
|
lib/Carlstuff/evaporation/__votca_ctp__.py
|
Python
|
apache-2.0
| 23,221
|
[
"Gaussian"
] |
52b91164c7c466df916cf53ee1f549e982f6ef7a8cde3acae2622c5e9469d6c3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Greyscale ℓ2-TV Denoising
=========================
This example demonstrates the use of class :class:`.tvl2.TVL2Denoise` for removing Gaussian white noise from a greyscale image using Total Variation regularization with an ℓ2 data fidelity term (ℓ2-TV denoising).
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import tvl2
from sporco import util
from sporco import metric
from sporco import plot
"""
Load reference image.
"""
img = util.ExampleImages().image('monarch.png', scaled=True,
idxexp=np.s_[:,160:672], gray=True)
"""
Construct test image corrupted by Gaussian white noise with a 0.05 standard deviation.
"""
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.05, img.shape)
"""
Set regularization parameter and options for ℓ2-TV denoising solver. The regularization parameter used here has been manually selected for good performance.
"""
lmbda = 0.04
opt = tvl2.TVL2Denoise.Options({'Verbose': True, 'MaxMainIter': 200,
'gEvalY': False, 'AutoRho': {'Enabled': True}})
"""
Create solver object and solve, returning the the denoised image ``imgr``.
"""
b = tvl2.TVL2Denoise(imgn, lmbda, opt)
imgr = b.solve()
"""
Display solve time and denoising performance.
"""
print("TVL2Denoise solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgr))
"""
Display reference, corrupted, and denoised images.
"""
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Corrupted', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgr, title=r'Restored ($\ell_2$-TV)', fig=fig)
fig.show()
"""
Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.
"""
its = b.getitstat()
fig = plot.figure(figsize=(20, 5))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy',
xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'],
fig=fig)
plot.subplot(1, 3, 3)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
|
bwohlberg/sporco
|
examples/scripts/tv/tvl2den_gry.py
|
Python
|
bsd-3-clause
| 2,718
|
[
"Gaussian"
] |
dd5f2ea877bcf4bcd0a9b9935b105fec3713ba3fc6f2a7ffe73e870ee7d4b81b
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. currentmodule:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative distribution function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative distribution function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative distribution function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to `kolmogorov`
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
owens_t -- Owen's T Function.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
voigt -- Voigt profile.
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- Associated Legendre function of the first kind for complex arguments.
lpn -- Legendre function of the first kind.
lqn -- Legendre function of the second kind.
lpmn -- Sequence of associated Legendre functions of the first kind.
lqmn -- Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
The ``orthopoly1d`` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- Legendre polynomial.
chebyt -- Chebyshev polynomial of the first kind.
chebyu -- Chebyshev polynomial of the second kind.
chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- Jacobi polynomial.
laguerre -- Laguerre polynomial.
genlaguerre -- Generalized (associated) Laguerre polynomial.
hermite -- Physicist's Hermite polynomial.
hermitenorm -- Normalized (probabilist's) Hermite polynomial.
gegenbauer -- Gegenbauer (ultraspherical) polynomial.
sh_legendre -- Shifted Legendre polynomial.
sh_chebyt -- Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- The number of combinations of N things taken k at a time.
perm -- Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
softmax -- Softmax function.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
"""
from __future__ import division, print_function, absolute_import
from .sf_error import SpecialFunctionWarning, SpecialFunctionError
from . import _ufuncs
from ._ufuncs import *
from . import _basic
from ._basic import *
from ._logsumexp import logsumexp, softmax
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import (
ellip_harm,
ellip_harm_2,
ellip_normal
)
from .lambertw import lambertw
from ._spherical_bessel import (
spherical_jn,
spherical_yn,
spherical_in,
spherical_kn
)
__all__ = _ufuncs.__all__ + _basic.__all__ + orthogonal.__all__ + [
'SpecialFunctionWarning',
'SpecialFunctionError',
'orthogonal', # Not public, but kept in __all__ for back-compat
'logsumexp',
'softmax',
'multigammaln',
'ellip_harm',
'ellip_harm_2',
'ellip_normal',
'lambertw',
'spherical_jn',
'spherical_yn',
'spherical_in',
'spherical_kn',
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
lhilt/scipy
|
scipy/special/__init__.py
|
Python
|
bsd-3-clause
| 27,314
|
[
"Gaussian"
] |
71865e408e75c70ba5a3658eac5517bdaa7e4ef301505432d93c28c4b38b7dd4
|
"""
Open Babel utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from StringIO import StringIO
import subprocess
from rdkit import Chem
from rdkit_utils import serial
from vs_utils.utils import image_utils
class Ionizer(object):
"""
Calculate atomic formal charges at the given pH.
Parameters
----------
pH : float, optional (default 7.4)
pH at which to calculate formal charges.
"""
def __init__(self, pH=7.4):
self.pH = pH
def __call__(self, mol):
"""
Ionize a molecule.
Parameters
----------
mol : RDMol
Molecule.
"""
return self.ionize(mol)
def ionize(self, mol):
"""
Ionize a molecule while preserving 3D coordinates.
Parameters
----------
mol : RDMol
Molecule.
"""
if mol.GetNumConformers() > 0:
return self._ionize_3d(mol)
else:
return self._ionize_2d(mol)
def _ionize_2d(self, mol):
"""
Ionize a molecule without preserving conformers.
Note: this method removes explicit hydrogens from the molecule.
Parameters
----------
mol : RDMol
Molecule.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
args = ['obabel', '-i', 'can', '-o', 'can', '-p', str(self.pH)]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ionized_smiles, _ = p.communicate(smiles)
ionized_mol = Chem.MolFromSmiles(ionized_smiles)
# catch ionizer error
if ionized_mol is None:
raise IonizerError(mol)
return ionized_mol
def _ionize_3d(self, mol):
"""
Ionize a molecule while preserving conformers.
Parameters
----------
mol : RDMol
Molecule.
"""
assert mol.GetNumConformers() > 0
sdf = ''
for conf in mol.GetConformers():
sdf += Chem.MolToMolBlock(mol, confId=conf.GetId(),
includeStereo=True)
sdf += '$$$$\n'
args = ['obabel', '-i', 'sdf', '-o', 'sdf', '-p', str(self.pH)]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ionized_sdf, _ = p.communicate(sdf)
reader = serial.MolReader(StringIO(ionized_sdf), mol_format='sdf',
remove_salts=False) # no changes
try:
mols = list(reader.get_mols())
except RuntimeError as e: # catch pre-condition violations
raise IonizerError(e.message)
# catch ionizer failure
if len(mols) == 0:
raise IonizerError(mol)
# detection of stereochemistry based on 3D coordinates might result
# in issues when attempting to recombine ionized conformers, but we
# merge them anyway
if len(mols) == 1:
ionized_mol, = mols
else:
ionized_mol = mols[0]
for other in mols[1:]:
for conf in other.GetConformers():
ionized_mol.AddConformer(conf, assignId=True)
return ionized_mol
class MolImage(object):
"""
Generate 2D depictions of molecules.
Parameters
----------
size : int, optional (default 32)
Size (in any direction) of generated images.
"""
def __init__(self, size=32):
self.size = size
def __call__(self, mol):
"""
Generate a PNG image from a SMILES string.
Parameters
----------
mol : RDMol
Molecule.
size : int, optional (default 32)
Size (in any direction) of generated image.
"""
return self.depict(mol)
def depict(self, mol):
"""
Generate a PNG image from a SMILES string.
Parameters
----------
mol : RDMol
Molecule.
size : int, optional (default 32)
Size (in any direction) of generated image.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
args = ['obabel', '-i', 'can', '-o', 'png', '-xd', '-xC',
'-xp {}'.format(self.size)]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
png, _ = p.communicate(smiles)
im = image_utils.load(png)
return im
class IonizerError(Exception):
"""
Generic Ionizer exception.
"""
|
rbharath/pande-gas
|
vs_utils/utils/ob_utils.py
|
Python
|
bsd-3-clause
| 4,834
|
[
"Open Babel",
"RDKit"
] |
e0c2dbec2eb9e40fe9f43b3b54cf53e8768eb2988e17eae8d78a8351cf19bdfc
|
from Bio.PDB import Selection, PDBParser
from Bio.PDB.NeighborSearch import NeighborSearch
from contactnetwork.interaction import *
from contactnetwork.pdb import *
from contactnetwork.models import *
from io import StringIO
from protein.models import ProteinConformation
from structure.models import Structure
from signprot.models import SignprotComplex
import copy
# Distance between residues in peptide
NUM_SKIP_RESIDUES = 4
def compute_interactions(pdb_name,save_to_db = False):
do_distances = True
do_interactions = True
do_complexes = True
distances = []
classified = []
classified_complex = []
# Ensure that the PDB name is lowercase
pdb_name = pdb_name.lower()
# Get the pdb structure
struc = Structure.objects.get(protein_conformation__protein__entry_name=pdb_name)
pdb_io = StringIO(struc.pdb_data.pdb)
# Get the preferred chain
preferred_chain = struc.preferred_chain.split(',')[0]
# Get the Biopython structure for the PDB
s = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', pdb_io)[0]
#s = pdb_get_structure(pdb_name)[0]
chain = s[preferred_chain]
#return classified, distances
# remove residues without GN and only those matching receptor.
residues = struc.protein_conformation.residue_set.exclude(generic_number=None).all().prefetch_related('generic_number')
dbres = {}
dblabel = {}
for r in residues:
dbres[r.sequence_number] = r
dblabel[r.sequence_number] = r.generic_number.label
ids_to_remove = []
for res in chain:
if not res.id[1] in dbres.keys() and res.get_resname() != "HOH":
ids_to_remove.append(res.id)
for i in ids_to_remove:
chain.detach_child(i)
if do_distances:
for i1,res1 in enumerate(chain,1):
if not is_water(res1):
for i2,res2 in enumerate(chain,1):
if i2>i1 and not is_water(res2):
# Do not calculate twice.
distance = res1['CA']-res2['CA']
distances.append((dbres[res1.id[1]],dbres[res2.id[1]],distance,dblabel[res1.id[1]],dblabel[res2.id[1]]))
if do_interactions:
atom_list = Selection.unfold_entities(s[preferred_chain], 'A')
# Search for all neighbouring residues
ns = NeighborSearch(atom_list)
all_neighbors = ns.search_all(6.6, "R")
# Filter all pairs containing non AA residues
all_aa_neighbors = [pair for pair in all_neighbors if is_aa(pair[0]) and is_aa(pair[1])]
# Only include contacts between residues more than NUM_SKIP_RESIDUES sequence steps apart
all_aa_neighbors = [pair for pair in all_aa_neighbors if abs(pair[0].id[1] - pair[1].id[1]) > NUM_SKIP_RESIDUES]
# For each pair of interacting residues, determine the type of interaction
interactions = [InteractingPair(res_pair[0], res_pair[1], dbres[res_pair[0].id[1]], dbres[res_pair[1].id[1]], struc) for res_pair in all_aa_neighbors if not is_water(res_pair[0]) and not is_water(res_pair[1]) ]
# Split unto classified and unclassified.
classified = [interaction for interaction in interactions if len(interaction.get_interactions()) > 0]
if do_complexes:
try:
# check if structure in signprot_complex
complex = SignprotComplex.objects.get(structure=struc)
# Get all GPCR residue atoms based on preferred chain
gpcr_atom_list = [ atom for residue in Selection.unfold_entities(s[preferred_chain], 'R') if is_aa(residue) \
for atom in residue.get_atoms()]
# Get all residue atoms from the coupled protein (e.g. G-protein)
# NOW: select alpha subnit protein chain using complex model
sign_atom_list = [ atom for residue in Selection.unfold_entities(s[complex.alpha], 'R') if is_aa(residue) \
for atom in residue.get_atoms()]
ns_gpcr = NeighborSearch(gpcr_atom_list)
ns_sign = NeighborSearch(sign_atom_list)
# For each GPCR atom perform the neighbor search on the signaling protein
all_neighbors = {(gpcr_atom.parent, match_res) for gpcr_atom in gpcr_atom_list
for match_res in ns_sign.search(gpcr_atom.coord, 4.5, "R")}
# For each pair of interacting residues, determine the type of interaction
residues_sign = ProteinConformation.objects.get(protein__entry_name=pdb_name+"_"+complex.alpha.lower()).residue_set.exclude(generic_number=None).all().prefetch_related('generic_number')
# grab labels from sign protein
dbres_sign = {}
dblabel_sign = {}
for r in residues_sign:
dbres_sign[r.sequence_number] = r
dblabel_sign[r.sequence_number] = r.generic_number.label
# Find interactions
interactions = [InteractingPair(res_pair[0], res_pair[1], dbres[res_pair[0].id[1]], dbres_sign[res_pair[1].id[1]], struc) for res_pair in all_neighbors if res_pair[0].id[1] in dbres and res_pair[1].id[1] in dbres_sign ]
# Filter unclassified interactions
classified_complex = [interaction for interaction in interactions if len(interaction.get_interactions()) > 0]
# Convert to dictionary for water calculations
interaction_pairs = {}
for pair in classified_complex:
res_1 = pair.get_residue_1()
res_2 = pair.get_residue_2()
key = res_1.get_parent().get_id()+str(res_1.get_id()[1]) + "_" + res_2.get_parent().get_id()+str(res_2.get_id()[1])
interaction_pairs[key] = pair
# Obtain list of all water molecules in the structure
water_list = { water for chain in s for residue in chain
if residue.get_resname() == "HOH" for water in residue.get_atoms() }
# If waters are present calculate water-mediated interactions
if len(water_list) > 0:
## Iterate water molecules over coupled and gpcr atom list
water_neighbors_gpcr = {(water, match_res) for water in water_list
for match_res in ns_gpcr.search(water.coord, 3.5, "R")}
water_neighbors_sign = {(water, match_res) for water in water_list
for match_res in ns_sign.search(water.coord, 3.5, "R")}
# TODO: DEBUG AND VERIFY this code as water-mediated interactions were present at this time
# 1. UPDATE complexes to include also mini Gs and peptides (e.g. 4X1H/6FUF/5G53)
# 2. Run and verify water-mediated do_interactions
# 3. Improve the intersection between the two hit lists
## TODO: cleaner intersection between hits from the two Lists
# see new code below
# for water_pair_one in water_neighbors_gpcr:
# for water_pair_two in water_neighbors_sign:
# if water_pair_one[0]==water_pair_two[0]:
# res_1 = water_pair_one[1]
# res_2 = water_pair_two[1]
# key = res_1.get_parent().get_id()+str(res_1.get_id()[1]) + "_" + res_2.get_parent().get_id()+str(res_2.get_id()[1])
# Check if interaction is polar
# if any(get_polar_interactions(water_pair_one[0].get_parent(), water_pair_one[1])) and any(get_polar_interactions(water_pair_two[0].get_parent(), water_pair_two[1])):
# TODO Check if water interaction is already present (e.g. multiple waters)
# TODO Is splitting of sidechain and backbone-mediated interactions desired?
# if not key in interaction_pairs:
# interaction_pairs[key] = InteractingPair(res_1, res_2, dbres[res_1.id[1]], dbres_sign[res_2.id[1]], struc)
# TODO: fix assignment of interacting atom labels (now seems limited to residues)
# interaction_pairs[key].interactions.append(WaterMediated(a + "|" + str(water_pair_one[0].get_parent().get_id()[1]), b))
except SignprotComplex.DoesNotExist:
# print("No complex definition found for", pdb_name)
log = "No complex definition found for " + pdb_name
except ProteinConformation.DoesNotExist:
print("No protein conformation definition found for signaling protein of ", pdb_name)
# log = "No protein conformation definition found for signaling protein of " + pdb_name
if save_to_db:
if do_interactions:
# Delete previous for faster load in
InteractingResiduePair.objects.filter(referenced_structure=struc).all().delete()
# bulk_pair = []
# for d in distances:
# pair = InteractingResiduePair(res1=d[0], res2=d[1], referenced_structure=struc)
# bulk_pair.append(pair)
# Create interaction dictionary
interaction_pairs = {}
for pair in classified:
res_1 = pair.get_residue_1()
res_2 = pair.get_residue_2()
key = res_1.get_parent().get_id()+str(res_1.get_id()[1]) + "_" + res_2.get_parent().get_id()+str(res_2.get_id()[1])
interaction_pairs[key] = pair
# POSSIBLE ADDON: support for multiple water-mediated bonds
## Obtain list of water molecules
water_list = { water for residue in s[preferred_chain] if residue.get_resname() == "HOH" for water in residue.get_atoms() }
if len(water_list) > 0:
## Iterate water molecules over residue atom list
water_neighbors = [(water, match_res) for water in water_list
for match_res in ns.search(water.coord, 3.5, "R") if not is_water(match_res) and (is_hba(match_res) or is_hbd(match_res))]
# intersect between residues sharing the same interacting water
for index_one in range(len(water_neighbors)):
water_pair_one = water_neighbors[index_one]
for index_two in [ index for index in range(index_one+1, len(water_neighbors)) if water_pair_one[0]==water_neighbors[index][0] ]:
water_pair_two = water_neighbors[index_two]
res_1 = water_pair_one[1]
res_2 = water_pair_two[1]
# TODO: order residues + check minimum spacing between residues
key = res_1.get_parent().get_id()+str(res_1.get_id()[1]) + "_" + res_2.get_parent().get_id()+str(res_2.get_id()[1])
# Verify h-bonds between water and both residues
matches_one = InteractingPair.verify_water_hbond(water_pair_one[1], water_pair_one[0])
matches_two = InteractingPair.verify_water_hbond(water_pair_two[1], water_pair_two[0])
if len(matches_one) > 0 and len(matches_two) > 0:
# if not exists, create residue pair without interactions
if not key in interaction_pairs:
interaction_pairs[key] = InteractingPair(res_1, res_2, dbres[res_1.id[1]], dbres[res_2.id[1]], struc)
for a,b in zip(matches_one, matches_two):
# HACK: store water ID as part of first atom name
interaction_pairs[key].interactions.append(WaterMediated(a + "|" + str(water_pair_one[0].get_parent().get_id()[1]), b))
for p in classified:
p.save_into_database()
if do_complexes:
for pair in classified_complex:
pair.save_into_database()
if do_distances:
# Distance.objects.filter(structure=struc).all().delete()
bulk_distances = []
for i,d in enumerate(distances):
distance = Distance(distance=int(100*d[2]),res1=d[0], res2=d[1],gn1=d[3], gn2=d[4], gns_pair='_'.join([d[3],d[4]]), structure=struc)
bulk_distances.append(distance)
if len(bulk_distances)>1000:
pairs = Distance.objects.bulk_create(bulk_distances)
bulk_distances = []
pairs = Distance.objects.bulk_create(bulk_distances)
return classified, distances
|
cmunk/protwis
|
contactnetwork/cube.py
|
Python
|
apache-2.0
| 12,696
|
[
"Biopython"
] |
4cecb3b1d09475a204c68ae11aa770feafd518015286425279885a8d01c6c7aa
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis.
Requires qualified name annotations (see qual_names.py).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# TODO(mdan): Add support for PY3 (e.g. Param vs arg).
# TODO(alexbw): Ignore named literals (e.g. None)
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Attributes:
modified: identifiers modified in this scope
created: identifiers created in this scope
used: identifiers referenced in this scope
"""
def __init__(self, parent, isolated=True, add_unknown_symbols=False):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
created in this scope should be visible to the parent scope.
add_unknown_symbols: Whether to handle attributed and subscripts
without having first seen the base name.
E.g., analyzing the statement 'x.y = z' without first having seen 'x'.
"""
self.isolated = isolated
self.parent = parent
self.add_unknown_symbols = add_unknown_symbols
self.modified = set()
# TODO(mdan): Completely remove this.
self.created = set()
self.used = set()
self.params = {}
self.returned = set()
# TODO(mdan): Rename to `locals`
@property
def referenced(self):
if not self.isolated and self.parent is not None:
return self.used | self.parent.referenced
return self.used
def __repr__(self):
return 'Scope{r=%s, c=%s, w=%s}' % (tuple(self.used), tuple(self.created),
tuple(self.modified))
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
if (self.parent is None) != (other.parent is None):
raise ValueError('cannot copy scopes of different structures')
if other.parent is not None:
self.parent.copy_from(other.parent)
self.isolated = other.isolated
self.modified = copy.copy(other.modified)
self.created = copy.copy(other.created)
self.used = copy.copy(other.used)
self.params = copy.copy(other.params)
self.returned = copy.copy(other.returned)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
if (self.parent is None) != (other.parent is None):
raise ValueError('cannot merge scopes of different structures')
if other.parent is not None:
self.parent.merge_from(other.parent)
self.modified |= other.modified
self.created |= other.created
self.used |= other.used
self.params.update(other.params)
self.returned |= other.returned
def has(self, name):
if name in self.modified:
return True
elif self.parent is not None:
return self.parent.has(name)
return False
def mark_read(self, name):
self.used.add(name)
if self.parent is not None and name not in self.created:
self.parent.mark_read(name)
def mark_param(self, name, owner):
self.params[name] = owner
def mark_creation(self, name, writes_create_symbol=False):
"""Mark a qualified name as created."""
if name.is_composite():
parent = name.parent
if not writes_create_symbol:
return
else:
if not self.has(parent):
if self.add_unknown_symbols:
self.mark_read(parent)
else:
raise ValueError('Unknown symbol "%s".' % parent)
self.created.add(name)
def mark_write(self, name):
"""Marks the given symbol as modified in the current scope."""
self.modified.add(name)
if self.isolated:
self.mark_creation(name)
else:
if self.parent is None:
self.mark_creation(name)
else:
if not self.parent.has(name):
self.mark_creation(name)
self.parent.mark_write(name)
def mark_returned(self, name):
self.returned.add(name)
if not self.isolated and self.parent is not None:
self.parent.mark_returned(name)
class ActivityAnalyzer(transformer.Base):
"""Annotates nodes with local scope information.
See Scope.
The use of this class requires that qual_names.resolve() has been called on
the node. This class will ignore nodes have not been
annotated with their qualified names.
"""
def __init__(self, context, parent_scope=None, add_unknown_symbols=False):
super(ActivityAnalyzer, self).__init__(context)
self.scope = Scope(parent_scope, None, add_unknown_symbols)
self._in_return_statement = False
self._in_aug_assign = False
@property
def _in_constructor(self):
if len(self.enclosing_entities) > 1:
innermost = self.enclosing_entities[-1]
parent = self.enclosing_entities[-2]
return isinstance(parent, gast.ClassDef) and innermost.name == '__init__'
return False
def _node_sets_self_attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
# TODO(mdan): The 'self' argument is not guaranteed to be called 'self'.
if qn.has_attr and qn.parent.qn == ('self',):
return True
return False
def _track_symbol(self,
node,
composite_writes_alter_parent=False,
writes_create_symbol=False):
# A QN may be missing when we have an attribute (or subscript) on a function
# call. Example: a().b
if not anno.hasanno(node, anno.Basic.QN):
return
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Store):
self.scope.mark_write(qn)
if qn.is_composite and composite_writes_alter_parent:
self.scope.mark_write(qn.parent)
if writes_create_symbol:
self.scope.mark_creation(qn, writes_create_symbol=True)
if self._in_aug_assign:
self.scope.mark_read(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.mark_read(qn)
elif isinstance(node.ctx, gast.Param):
# Param contexts appear in function defs, so they have the meaning of
# defining a variable.
self.scope.mark_write(qn)
self.scope.mark_param(qn, self.enclosing_entities[-1])
else:
raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))
anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))
if self._in_return_statement:
self.scope.mark_returned(qn)
def _enter_scope(self, isolated):
self.scope = Scope(self.scope, isolated=isolated)
def _exit_scope(self):
self.scope = self.scope.parent
def _process_statement(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
return node
def visit_Expr(self, node):
return self._process_statement(node)
def visit_Return(self, node):
self._in_return_statement = True
node = self._process_statement(node)
self._in_return_statement = False
return node
def visit_Assign(self, node):
return self._process_statement(node)
def visit_AugAssign(self, node):
# Special rules for AugAssign. In Assign, the target is only written,
# but in AugAssig (e.g. a += b), the target is both read and written.
self._in_aug_assign = True
node = self._process_statement(node)
self._in_aug_assign = False
return node
def visit_Name(self, node):
node = self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(
node, composite_writes_alter_parent=True, writes_create_symbol=True)
else:
self._track_symbol(node)
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
self._track_symbol(node)
return node
def visit_Print(self, node):
self._enter_scope(False)
node.values = self.visit_block(node.values)
anno.setanno(node, anno.Static.SCOPE, self.scope)
anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
self._exit_scope()
return node
def visit_Assert(self, node):
return self._process_statement(node)
def visit_Call(self, node):
self._enter_scope(False)
node.args = self.visit_block(node.args)
node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
self._exit_scope()
node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
self._enter_scope(False)
block = self.visit_block(block)
anno.setanno(node, scope_name, self.scope)
self._exit_scope()
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope.copy_of(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope.copy_of(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def visit_arguments(self, node):
return self._process_statement(node)
def visit_FunctionDef(self, node):
# The FunctionDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompany it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.mark_write(qual_names.QN(node.name))
anno.setanno(node, anno.Static.SCOPE, self.scope)
self._exit_scope()
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
node.args = self.visit(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
self._enter_scope(False)
node.body = self.visit_block(node.body)
anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
self._exit_scope()
self._exit_scope()
return node
def visit_With(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
self._exit_scope()
return node
def visit_withitem(self, node):
return self._process_statement(node)
def visit_If(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
anno.setanno(node.test, anno.Static.SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
anno.setanno(node.iter, anno.Static.SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
anno.setanno(node.test, anno.Static.SCOPE, self.scope)
self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
|
xodus7/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/activity.py
|
Python
|
apache-2.0
| 13,600
|
[
"VisIt"
] |
2303cce48ac2557d8e6ffddae22226d8deb1b76717431a033c1e8d38f6dace86
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lingvo layers that depend on attention layers but are not recurrent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import attention
from REDACTED.transformer_lingvo.lingvo.core import base_layer
from REDACTED.transformer_lingvo.lingvo.core import layers
from REDACTED.transformer_lingvo.lingvo.core import py_utils
from REDACTED.transformer_lingvo.lingvo.core import symbolic
from six.moves import range
class TransformerAttentionLayer(base_layer.BaseLayer):
"""Multi-headed attention, add and norm used by 'Attention Is All You Need'.
This class implements the first sub-layer of Transformer Layer. Input is
first processed using a multi-headed (self) attention. Output of the
attention layer is combined with the residual connection. And the finally,
output is normalized using Layer Normalization.
Layer can be used in five scenarios:
1. Multi-Headed Self-Attention, where attention keys (source vectors),
attention values (context vectors) and queries come from the same previous
layer output, `query_vec`. This is the general use case for encoder
Transformer Layers.
2. Masked Multi-Headed Self-Attention, where attention keys, attention values
and queries all come from the same previous layer output, but rightward
activations are masked to prevent information flow from future. This is the
use case for decoder self-attention Transformer Layers. Can be activated by
setting `is_masked` flag of this layer.
3. Multi-Headed Attention, where attention keys and attention values
`source_vecs`, are coming from a different source (output of the encoder)
and queries `query_vec`, coming from the previous layer outputs (decoder).
This corresponds to the standard attention mechanism, decoder attending the
encoder outputs.
4. Multi-Headed Attention, where attention values `context_vecs` are coming
from a different source than queries and keys, e.g. for positional
attention, where keys and queries are positional encodings and values are
decoder states.
5. Masked Multi-Headed Self-Attention, where attention keys, attention values
and queries all come from the same previous layer output, but the
activations for the current position are masked to reduce the impact of
high self-similarity. This is the use case for non-autoregressive decoder
self-attention Transformer Layers. Can be activated by setting `is_masked`
flag of this layer and setting `mask_type="eye"`.
6. Masked Multi-Headed Self-Attention, where attention keys, attention values
and queries all come from the same previous layer output, but:
. rightward activations are masked to prevent information flow from future.
. leftward activations are also masked to prevent information flow from
past tokens that are beyond the N-gram context [K-N+1, K-1] when predicting
the target token in position K. This is the use case for decoder
self-attention Transformer Layers in N-gram mode. Can be activated by
setting `is_masked` flag of this layer, and setting both
`mask_type="ngram"` and `mask_ngram_order=N-1` to use as context only the
previous N-1 tokens (as expected for an N-gram model); for details and
experimental results see https://arxiv.org/abs/2001.04589.
"""
@classmethod
def Params(cls):
p = super(TransformerAttentionLayer, cls).Params()
p.Define('source_dim', 0, 'Dimension of the transformer block input.')
p.Define('context_dim', 0, 'Dimension of the attention contexts.')
p.Define('atten_hidden_dim', 0, 'Dimension of the attention hidden dim.')
p.Define('num_attention_heads', 8, 'Number of attention heads.')
p.Define('is_masked', False, 'If set, uses masked MultiHeadedAttention.')
p.Define(
'mask_ngram_order', 0, 'N-gram order, relevant only when'
'`mask_type` is set to "ngram".')
p.Define(
'mask_type', 'future', 'Type of attention mask if `is_masked` is'
'set. Either "future" for masking out attention to future'
'positions or "eye" for masking out the token itself, or "ngram" for'
'bounding the left context to the previous N-1 tokens, where N is set'
'by `mask_ngram_order`.')
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params.')
p.Define(
'atten_tpl',
attention.MultiHeadedAttention.Params().Set(
use_source_vec_as_attention_value=False, enable_ctx_post_proj=True),
'Multi-Headed Dot-Attention default params.')
p.Define(
'atten_dropout_prob', 0.0,
'Probability at which we apply dropout to the attention probs. '
'This practically drops memory values at random positions.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, f(x)) = (x + dropout(f(x))).')
p.Define(
'residual_dropout_tpl', layers.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.Define('add_unnormalized_input', False, 'If set, uses unnormalized input '
'in the residual add.')
p.Define(
'residual_function', None, 'When None (the default), use simple '
'sum for the residual connection (output = x + f(x)). For example, can '
'use layers.HighwaySkipLayer.Params() or layers.GatingLayer.Params() '
'for gated residual add, where output is instead '
'residual_function.FProp(x, f(x)).')
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerAttentionLayer, self).__init__(params)
p = self.params
assert p.name
assert p.source_dim
if not p.atten_hidden_dim:
p.atten_hidden_dim = p.source_dim
if not p.context_dim:
p.context_dim = p.source_dim
if p.is_masked:
assert p.mask_type in ['future', 'eye', 'ngram']
with tf.variable_scope(p.name):
params = self._InitAttention(p.atten_tpl)
self.CreateChild('atten', params)
# Initialize attention layer norm
params = p.ln_tpl.Copy()
params.name = 'atten_ln'
params.input_dim = p.source_dim
self.CreateChild('layer_norm', params)
dropout_tpl = p.residual_dropout_tpl.Copy()
dropout_tpl.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', dropout_tpl)
if p.residual_function is not None:
params = p.residual_function.Copy()
params.input_dim = p.atten_hidden_dim
self.CreateChild('residual_function', params)
def _InitAttention(self, atten_tpl):
p = self.params
# Initialize multi-headed attention
params = atten_tpl.Copy()
params.name = 'multihead_atten'
params.source_dim = p.source_dim
params.query_dim = p.source_dim
params.hidden_dim = p.atten_hidden_dim
params.context_dim = p.context_dim
params.ctx_post_proj_dim = p.source_dim
params.num_attention_heads = p.num_attention_heads
params.atten_dropout_prob = p.atten_dropout_prob
params.packed_input = p.packed_input
return params
def _GetSourceLength(self, source_paddings):
return py_utils.GetShape(source_paddings)[0]
def FProp(self,
theta,
query_vec,
source_paddings,
source_vecs=None,
query_segment_id=None,
source_segment_id=None,
context_vecs=None,
**kwargs):
"""Transformer attention, residual and normalization layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_time, target_batch, dim]
source_paddings: [source_time, source_batch]
source_vecs: [source_time, source_batch, dim].
query_segment_id: [target_time, target_batch]
source_segment_id: [source_time, source_batch]
context_vecs: [source_time, target_batch, dim]
**kwargs: Can be optional params for the attention layer, eg. attention
projection index tensor.
Returns:
(output, atten_probs). output is of shape [target_time, target_batch,
context_dim], atten_probs is of shape [target_time, target_batch,
source_time].
"""
p = self.params
unnormalized_query_vec = query_vec
query_vec = self.layer_norm.FProp(theta.layer_norm, query_vec)
if source_vecs is None: # For self-attention: keys = queries.
source_vecs = query_vec
source_segment_id = query_segment_id
if context_vecs is None: # Inter/self-attention: keys = values/contexts.
context_vecs = source_vecs
target_time, target_bs, query_dim = py_utils.GetShape(query_vec, 3)
if p.is_masked:
assert source_vecs is not None
query_vec = py_utils.with_dependencies([
py_utils.assert_shape_match(
tf.shape(source_vecs), tf.shape(query_vec))
], query_vec)
# Prepares mask for self-attention
# Padding is complemented, so time indexes that we want to mask out
# receive padding weight 1.0.
if p.mask_type == 'future':
padding = 1.0 - tf.linalg.band_part(
tf.ones([target_time, target_time], dtype=py_utils.FPropDtype(p)),
-1, 0)
elif p.mask_type == 'eye':
padding = tf.eye(target_time, target_time, dtype=py_utils.FPropDtype(p))
elif p.mask_type == 'ngram': # Maybe apply N-gram mask.
assert p.mask_ngram_order
padding = 1.0 - tf.linalg.band_part(
tf.ones([target_time, target_time], dtype=py_utils.FPropDtype(p)),
tf.minimum(p.mask_ngram_order - 1, target_time - 1), 0)
# [time, batch, time]
causal_padding = tf.tile(tf.expand_dims(padding, 1), [1, target_bs, 1])
causal_padding = tf.reshape(causal_padding, [-1, target_time])
else:
causal_padding = None
# Projects keys and values.
packed_src = self.atten.PackSource(
theta=theta.atten,
source_vecs=source_vecs, # keys
source_contexts=context_vecs, # values
source_padding=source_paddings,
source_segment_id=source_segment_id)
if query_segment_id is not None:
query_segment_id = tf.reshape(query_segment_id, [-1])
ctx_vec, atten_prob, _ = self.atten.ComputeContextVectorWithSource(
theta=theta.atten,
packed_src=packed_src,
query_vec=tf.reshape(query_vec, [-1, query_dim]),
per_step_source_padding=causal_padding,
query_segment_id=query_segment_id,
**kwargs)
ctx_vec = self.residual_dropout.FProp(theta.residual_dropout, ctx_vec)
input_to_add = (
unnormalized_query_vec if p.add_unnormalized_input else query_vec)
input_after_sublayer = tf.reshape(
ctx_vec,
[
target_time,
target_bs,
-1 # Either projected or not.
])
if p.residual_function is None:
h = input_to_add + input_after_sublayer
else:
h = self.residual_function.FProp(theta.residual_function, input_to_add,
input_after_sublayer)
atten_prob = tf.reshape(
atten_prob,
[target_time, target_bs,
self._GetSourceLength(source_paddings)])
return h, atten_prob
def _FinishExtendStep(self,
theta,
query_vec,
unnormalized_query_vec,
extended_packed_src,
t=None):
"""Finish extending prefix by one more time step.
Isolating this function from ExtendStep allows generalizing self-attention
to causal attention on other inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_batch, dim]
unnormalized_query_vec: [target_batch, dim]
extended_packed_src: A `.NestedMap` object containing source_vecs,
source_contexts, source_paddings, and source_segment_ids
t: a scalar, the current time step, 0-based.
Returns:
A triplet (cur_output, atten_prob, new_state) where cur_output is a tensor
representing the output from the current state, and new_state is the new
state `.NestedMap`.
"""
p = self.params
# Compute per_step_source_padding. Padding is complemented, so time indexes
# that we want to mask out receive padding weight 1.0.
query_batch_size = py_utils.GetShape(query_vec)[0]
source_seq_len = py_utils.GetShape(extended_packed_src.source_vecs)[0]
zero_padding = tf.fill([source_seq_len],
tf.constant(0.0, dtype=query_vec.dtype))
ones_padding = tf.ones_like(zero_padding, dtype=query_vec.dtype)
if t is not None:
per_step_source_padding = tf.where(
tf.less(tf.range(source_seq_len), tf.fill([source_seq_len], t + 1)),
zero_padding, ones_padding)
per_step_source_padding = tf.tile(
tf.expand_dims(per_step_source_padding, axis=0),
[query_batch_size, 1])
# Maybe apply N-gram masking.
# TODO(ciprianchelba): As pointed out by miachen, to get the expected
# speed-up we should go with per_step_source_padding=None here, and
# everytime we update the prefix_states, we not only extend one step, but
# also only keep the prefix_states for the most recent N steps instead of
# the prefix states all the way from step 0.
elif p.is_masked and p.mask_type == 'ngram':
assert p.mask_ngram_order
idx = tf.maximum(0, source_seq_len - p.mask_ngram_order)
per_step_source_padding = tf.where(
tf.less(tf.range(source_seq_len), tf.fill([source_seq_len], idx)),
ones_padding, zero_padding)
per_step_source_padding = tf.tile(
tf.expand_dims(per_step_source_padding, axis=0),
[query_batch_size, 1])
else:
per_step_source_padding = None
ctx_vec, atten_prob, _ = self.atten.ComputeContextVectorWithCachedSource(
theta.atten,
extended_packed_src,
query_vec,
per_step_source_padding=per_step_source_padding)
ctx_vec = self.residual_dropout.FProp(theta.residual_dropout, ctx_vec)
input_to_add = (
unnormalized_query_vec if p.add_unnormalized_input else query_vec)
input_after_sublayer = tf.reshape(ctx_vec, py_utils.GetShape(query_vec))
if p.residual_function is None:
h = input_to_add + input_after_sublayer
else:
h = self.residual_function.FProp(theta.residual_function, input_to_add,
input_after_sublayer)
new_states = py_utils.NestedMap(
key=extended_packed_src.source_vecs,
value=extended_packed_src.source_contexts)
return h, atten_prob, new_states
def ExtendStep(self, theta, query_vec, prefix_state, t=None):
"""Extend prefix by one more time step.
This function is expected to be called during fast decoding of the
Transformer model.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_batch, dim]
prefix_state: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
t: a scalar, the current time step, 0-based.
Returns:
A triplet (cur_output, atten_prob, new_state) where cur_output is a tensor
representing the output from the current state, and new_state is the new
state `.NestedMap`.
"""
p = self.params
assert p.is_masked # Must be causal attention.
unnormalized_query_vec = query_vec
query_vec = self.layer_norm.FProp(theta.layer_norm, query_vec)
cached_packed_src = py_utils.NestedMap(
source_vecs=prefix_state.key,
source_contexts=prefix_state.value,
source_padding=None,
source_segment_id=None)
extended_packed_src = self.atten.ExtendSourcePacked(theta.atten, query_vec,
query_vec, None, None,
cached_packed_src, t)
return self._FinishExtendStep(theta, query_vec, unnormalized_query_vec,
extended_packed_src, t)
class TransformerMultiSourceAttentionLayer(TransformerAttentionLayer):
"""Multi-source multi-headed attention.
Only supports scenarios 3 and 4 in the base class. Now the two scenarios are:
3. Multi-source multi-Headed Attention, where attention keys and attention
values `source_vecs`, are different encodings and queries `query_vec`,
coming from the previous layer outputs (decoder). In addition,
attention keys and values are NestedMaps containing encodings of different
sources. This corresponds to a multi-source decoder-to-encoder attention
mechanism, i.e., decoder attends to encoder outputs and other sources.
4. Similar to 3 but attention values `context_vecs` are coming from a
different source than queries and keys.
"""
@classmethod
def Params(cls):
p = super(TransformerMultiSourceAttentionLayer, cls).Params()
p.Define('num_source', 0, 'Number of sources to attend to.')
p.Define(
'primary_source_index', 0, 'Index of the primary source whose '
'attention probs will be returned.')
p.Define('multi_source_atten', attention.MultiSourceAttention.Params(),
'Multi-source attention params.')
# Only used for case 3 and 4.
p.is_masked = False
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerMultiSourceAttentionLayer, self).__init__(params)
def _InitAttention(self, atten_tpl):
p = self.params
source_atten_tpls = []
# Set up each source attention.
for i in range(p.num_source):
src_key = 'source_%d' % i
src_atten = atten_tpl.Copy()
src_atten = super(TransformerMultiSourceAttentionLayer,
self)._InitAttention(src_atten)
src_atten.name = 'multihead_atten_%s' % src_key
source_atten_tpls.append((src_key, src_atten))
# Initialize multi-source attention.
msa = p.multi_source_atten.Copy()
msa.name = 'multi_source_atten'
msa.source_dim = p.source_dim
msa.query_dim = p.source_dim
msa.source_atten_tpls = source_atten_tpls
msa.primary_source_key = 'source_%d' % p.primary_source_index
return msa
def _GetSourceLength(self, source_paddings):
return py_utils.GetShape(
source_paddings['source_%d' % self.params.primary_source_index])[0]
class TransformerFeedForwardLayer(base_layer.BaseLayer):
"""Feed-forward, add and norm layer used by 'Attention Is All You Need'.
This class implements the second sub-layer of Transformer Layer. First,
input passes through a feed-forward neural network with one hidden layer and
then projected back to the original input dimension to apply residual. Output
of the layer, is then normalized using Layer Normalization.
"""
@classmethod
def Params(cls):
p = super(TransformerFeedForwardLayer, cls).Params()
p.Define('input_dim', 0, 'Dimension of the layer input.')
p.Define('output_dim', 0, 'Dimension of the layer output.')
p.Define('hidden_dim', 0, 'Dimension of the hidden layer.')
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params')
p.Define('activation', 'RELU', 'Non-linearity.')
p.Define('fflayer_tpl',
layers.FeedForwardNet.Params().Set(activation=['RELU', 'NONE']),
'Feed forward layer default params')
p.Define(
'res_proj_tpl',
layers.ProjectionLayer.Params().Set(batch_norm=True),
'Residual projection default params, used when input_dim != '
'output_dim.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, y) = (x + dropout(y)).')
p.Define(
'residual_dropout_tpl', layers.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
p.Define(
'relu_dropout_prob', 0.0,
'Probability at which we apply dropout to the hidden layer '
'of feed-forward network.')
p.Define('add_skip_connection', True,
'If True, add skip_connection from input to output.')
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerFeedForwardLayer, self).__init__(params)
p = self.params
assert p.name
assert p.input_dim
assert symbolic.ToStatic(p.hidden_dim) > 0
with tf.variable_scope(p.name):
# Initialize feed-forward layer
params = p.fflayer_tpl.Copy()
params.name = 'fflayer'
params.input_dim = p.input_dim
params.activation = [p.activation, 'NONE']
if p.output_dim == 0:
params.hidden_layer_dims = [p.hidden_dim, p.input_dim]
else:
params.hidden_layer_dims = [p.hidden_dim, p.output_dim]
if p.output_dim != p.input_dim:
pj = p.res_proj_tpl.Copy()
pj.name = 'res_proj'
pj.input_dim = p.input_dim
pj.output_dim = p.output_dim
pj.activation = 'NONE'
self.CreateChild('res_proj_layer', pj)
params.dropout = [
params.dropout.cls.Params().Set(keep_prob=1.0 - p.relu_dropout_prob),
params.dropout.cls.Params().Set(keep_prob=1.0)
]
self.CreateChild('fflayer', params)
# Initialize feed-forward layer norm
params = p.ln_tpl.Copy()
params.name = 'fflayer_ln'
params.input_dim = p.input_dim
self.CreateChild('layer_norm', params)
dropout_tpl = p.residual_dropout_tpl.Copy()
dropout_tpl.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', dropout_tpl)
@property
def output_dim(self):
"""Returns output dimension of the transformer layer."""
return self.fflayer.output_dim
@classmethod
def NumOutputNodes(cls, p):
return p.output_dim if p.output_dim else p.input_dim
def FProp(self, theta, inputs, paddings):
"""Feed-forward, residual and layer-norm.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: [time, batch, dim].
paddings: [time, batch]
Returns:
tensor of the same shape with inputs
"""
inputs_normalized = self.layer_norm.FProp(theta.layer_norm, inputs)
if hasattr(self, 'res_proj_layer'):
inputs = self.res_proj_layer.FProp(theta.res_proj_layer, inputs)
h = self.residual_dropout.FProp(
theta.residual_dropout,
self.fflayer.FProp(theta.fflayer, inputs_normalized,
tf.expand_dims(paddings, -1)))
if self.params.add_skip_connection:
h += inputs
return h
class TransformerLayer(base_layer.BaseLayer):
"""Transformer Layer proposed by 'Attention Is All You Need'.
Applies self-attention followed by a feed forward network and
layer normalization. Uses residual connections between each consecutive
layer. In particular, adds residuals from layer input and attention output
and from attention output (feed-forward input) to feed-forward output.
Implements the transformer block in 'Attention is All You Need':
https://arxiv.org/abs/1706.03762.
"""
@classmethod
def Params(cls):
p = super(TransformerLayer, cls).Params()
p.Define('source_dim', 0, 'Dimension of the transformer block input.')
p.Define('output_dim', 0, 'Dimension of the transformer block output.')
p.Define('tr_atten_tpl',
TransformerAttentionLayer.Params().Set(num_attention_heads=8),
'Transformer Attention Layer params.')
p.Define('tr_post_ln_tpl', None,
'(Optional) Layer norm at end of transformer layer.')
p.Define('tr_fflayer_tpl',
TransformerFeedForwardLayer.Params().Set(hidden_dim=2048),
'Transformer Feed-Forward Layer params.')
p.Define(
'has_aux_atten', False,
'If set, introduces a second attention layer, which attends to'
' the auxiliary source contexts.')
p.Define('tr_aux_atten_tpl', None, 'Transformer Attention Layer params.')
p.Define('mask_self_atten', False, 'If True, use masked self-attention.')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.Define(
'is_decoder', False, '(Deprecated) '
'If true, forces both has_aux_atten and mask_self_atten to true.')
p.Define(
'num_aux_atten_post_proj', 1, 'Number of post projections for aux '
'attention. This is usually used in multi-task setting, in which '
'each task uses one dedicated projection layer.')
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLayer, self).__init__(params)
p = self.params
assert p.name
assert p.source_dim
if p.is_decoder:
tf.logging.warning('TransformerLayer.is_decoder is deprecated.')
p.has_aux_atten = True
p.mask_self_atten = True
with tf.variable_scope(p.name):
# Initialize multi-headed self-attention
params = p.tr_atten_tpl.Copy()
params.name = 'multihead_self_atten'
params.source_dim = p.source_dim
params.packed_input = p.packed_input
params.is_masked = p.mask_self_atten
self.CreateChild('self_atten', params)
if p.has_aux_atten:
# Initialize masked-multi-headed attention
params = (
p.tr_atten_tpl.Copy()
if p.tr_aux_atten_tpl is None else p.tr_aux_atten_tpl.Copy())
params.name = 'multihead_atten'
params.source_dim = p.source_dim
params.packed_input = p.packed_input
if hasattr(params.atten_tpl, 'num_post_proj'):
params.atten_tpl.num_post_proj = p.num_aux_atten_post_proj
self.CreateChild('atten', params)
# Initialize feed-forward layer
params = p.tr_fflayer_tpl.Copy()
params.name = 'tr_fflayer'
params.input_dim = p.source_dim
params.output_dim = p.output_dim
self.CreateChild('fflayer', params)
# Initialize output layer norm
if p.tr_post_ln_tpl:
params = p.tr_post_ln_tpl.Copy()
params.name = 'tr_post_layer_norm'
params.input_dim = p.source_dim
self.CreateChild('layer_norm', params)
@property
def output_dim(self):
"""Returns output dimension of the transformer layer."""
# output_dim is equal to p.source_dim when p.output_dim is zero.
return self.fflayer.output_dim
@classmethod
def NumOutputNodes(cls, p):
return p.output_dim if p.output_dim else p.source_dim
def FProp(self,
theta,
source_vecs,
source_paddings,
aux_vecs=None,
aux_paddings=None,
source_segment_id=None,
aux_segment_id=None,
**kwargs):
"""Transformer Layer.
Transformer layer has the naming scheme as follows: `source_vecs` and
`source_paddings` are all assumed to be coming from the activations of the
layer below. When `TransformerLayer` is used in the Encoder (default
behavior of this layer) `source_*` tensors correspond to the outputs of
previous encoder layer. Further, keys, values and queries are all
forked from `source_vecs`. When TransformerLayer is used in the Decoder
(has_aux_atten=True), `source_*` tensors correspond to the outputs of
previous decoder layer and used as the queries.
For the cases when `TransformerLayer` is used in the decoder
(has_aux_atten=True) `aux_*` tensors have to be provided. Auxiliary inputs,
`aux_*` tensors, are then correspond to the top-most layer encoder outputs
and used by the second `TransformerAttentionLayer` as keys and values.
Regardless of the encoder or decoder, queries are always assumed to be
coming from the activations of layer below, in particular `source_vecs`.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_time, source_batch, dim].
source_paddings: [source_time, source_batch]
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
source_segment_id: [source_time, source_batch]
aux_segment_id: [aux_time, aux_batch]
**kwargs: Can be optional params for the attention layer, eg. attention
projection index tensor.
Returns:
The attention context vector, [source_time, source_batch, dim].
The attention probability vector, [source_time, source_batch, source_time]
if has_aux_atten is False, otherwise [source_time, source_batch,
aux_time].
"""
p = self.params
if p.packed_input:
assert source_segment_id is not None, ('Need to specify segment id for '
'packed input.')
with tf.name_scope('self_atten'):
atten_vec, atten_prob = self.self_atten.FProp(
theta.self_atten,
source_vecs,
source_paddings,
query_segment_id=source_segment_id)
if p.has_aux_atten:
assert aux_vecs is not None
assert aux_paddings is not None
with tf.name_scope('aux_atten'):
atten_vec, atten_prob = self.atten.FProp(theta.atten, atten_vec,
aux_paddings, aux_vecs,
source_segment_id,
aux_segment_id, **kwargs)
with tf.name_scope('fflayer'):
h = self.fflayer.FProp(theta.fflayer, atten_vec, source_paddings)
if p.tr_post_ln_tpl:
with tf.name_scope('layer_norm'):
h = self.layer_norm.FProp(theta.layer_norm, h)
return h, atten_prob
def ExtendStep(self,
theta,
source_vecs,
prefix_states,
aux_vecs=None,
aux_paddings=None,
t=None,
**kwargs):
"""Transformer Layer, extend one step in decoding.
This function is expected to be called during fast decoding of Transformer
models.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_batch, dim].
prefix_states: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
t: a scalar, the current time step, 0-based.
**kwargs: Can be optional params for the attention layer, eg. attention
projection index tensor.
Returns:
The attention context vector, [target_batch, source_dim]
The attention probability vector, [source_time, target_batch]
Updated prefix states
"""
p = self.params
if p.has_aux_atten:
assert aux_vecs is not None
assert aux_paddings is not None
batch_size = py_utils.GetShape(source_vecs)[0]
# First the self-attention layer.
atten_vec, atten_prob, new_states = self.self_atten.ExtendStep(
theta.self_atten, source_vecs, prefix_states, t)
atten_vec = tf.expand_dims(atten_vec, axis=0)
# Next the source attention layer.
if p.has_aux_atten:
atten_vec, atten_prob = self.atten.FProp(theta.atten, atten_vec,
aux_paddings, aux_vecs, **kwargs)
# Finally, the feedforward layer.
h = self.fflayer.FProp(
theta.fflayer, atten_vec,
tf.zeros([1, batch_size], dtype=py_utils.FPropDtype(p)))
if p.tr_post_ln_tpl:
h = self.layer_norm.FProp(theta.layer_norm, h)
h = tf.squeeze(h, 0)
return h, atten_prob, new_states
class EvolvedTransformerEncoderBranchedConvsLayer(base_layer.BaseLayer):
"""Evolved Transformer encoder branched convolutions layer.
This constructs the branched convolution portion of the Evolved Transformer
encoder described in https://arxiv.org/abs/1901.11117 .
"""
@classmethod
def Params(cls):
p = super(EvolvedTransformerEncoderBranchedConvsLayer, cls).Params()
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params')
p.Define('input_dim', 0, 'Dimension of the layer input.')
p.Define('activation', 'RELU',
'Activation applied after the left and right branches.')
p.Define('dropout_tpl', layers.DropoutLayer.Params(),
'Dropout applied to each layer output.')
p.Define('dense_tpl', layers.FCLayer.Params(),
'Fully connected "dense" layer.')
p.Define('conv_tpl', layers.Conv2DLayer.Params(),
'Standard convolution layer.')
p.Define('separable_conv_tpl', layers.SeparableConv2DLayer.Params(),
'Separable convolution layer.')
return p
@base_layer.initializer
def __init__(self, params):
super(EvolvedTransformerEncoderBranchedConvsLayer, self).__init__(params)
p = self.params
assert p.name
assert p.input_dim
with tf.variable_scope(p.name):
# Initialize first layer norm.
params = p.ln_tpl.Copy()
params.name = 'first_layer_norm'
params.input_dim = p.input_dim
self.CreateChild('first_layer_norm', params)
# Initialize second layer norm.
params = p.ln_tpl.Copy()
params.name = 'second_layer_norm'
params.input_dim = p.input_dim * 4
self.CreateChild('second_layer_norm', params)
# Initialize dense layer.
params = p.dense_tpl.Copy()
params.name = 'dense_layer'
params.input_dim = p.input_dim
params.activation = p.activation
params.output_dim = p.input_dim * 4
self.CreateChild('dense_layer', params)
# Initialize standard conv.
params = p.conv_tpl.Copy()
params.name = 'conv_layer'
params.bias = True
params.batch_norm = False
params.activation = p.activation
params.filter_stride = (1, 1)
params.filter_shape = (3, 1, p.input_dim, int(p.input_dim / 2))
self.CreateChild('conv_layer', params)
# Initialize separable conv.
params = p.separable_conv_tpl.Copy()
params.name = 'separable_conv_layer'
params.bias = True
params.batch_norm = False
params.activation = 'NONE'
params.filter_stride = (1, 1)
params.filter_shape = (9, 1, int(p.input_dim * 4), p.input_dim)
self.CreateChild('separable_conv_layer', params)
# Initialize dropout.
dropout_tpl = p.dropout_tpl.Copy()
self.CreateChild('dropout', dropout_tpl)
def FProp(self, theta, inputs, paddings):
inputs_normalized = self.first_layer_norm.FProp(theta.first_layer_norm,
inputs)
left_branch = self.dense_layer.FProp(theta.dense_layer, inputs_normalized,
tf.expand_dims(paddings, -1))
left_branch = self.dropout.FProp(theta.dropout, left_branch)
# Newly computed padding is discarded.
right_branch = self.conv_layer.FProp(
theta.conv_layer, tf.expand_dims(inputs_normalized, axis=2),
paddings)[0]
right_branch = tf.squeeze(right_branch, axis=2)
right_branch = self.dropout.FProp(theta.dropout, right_branch)
right_branch = tf.pad(
right_branch,
[[0, 0], [0, 0],
[0, tf.shape(left_branch)[-1] - tf.shape(right_branch)[-1]]],
constant_values=0)
hidden_state = left_branch + right_branch
hidden_state = self.second_layer_norm.FProp(theta.second_layer_norm,
hidden_state)
# Newly computed padding is discarded.
hidden_state = self.separable_conv_layer.FProp(
theta.separable_conv_layer, tf.expand_dims(hidden_state, axis=2),
paddings)[0]
hidden_state = tf.squeeze(hidden_state, axis=2)
hidden_state = tf.pad(
hidden_state, [[0, 0], [0, 0],
[0, tf.shape(inputs)[-1] - tf.shape(hidden_state)[-1]]],
constant_values=0)
hidden_state = self.dropout.FProp(theta.dropout, hidden_state)
hidden_state += inputs
return hidden_state
class EvolvedTransformerDecoderBranchedConvsLayer(base_layer.BaseLayer):
"""Evolved Transformer decoder branched convolutions layer.
This constructs the branched convolution portion of the Evolved Transformer
decoder described in https://arxiv.org/abs/1901.11117 .
"""
@classmethod
def Params(cls):
p = super(EvolvedTransformerDecoderBranchedConvsLayer, cls).Params()
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params')
p.Define('input_dim', 0, 'Dimension of the layer input.')
p.Define('activation', 'RELU',
'Activation applied to the left convolution branch output.')
p.Define('dropout_tpl', layers.DropoutLayer.Params(),
'Dropout applied to each layer output.')
p.Define('separable_conv_tpl',
layers.SeparableConv2DLayer.Params().Set(causal_convolution=True),
'Separable convolution layer.')
return p
@base_layer.initializer
def __init__(self, params):
super(EvolvedTransformerDecoderBranchedConvsLayer, self).__init__(params)
p = self.params
assert p.name
assert p.input_dim
with tf.variable_scope(p.name):
# Initialize first layer norm.
params = p.ln_tpl.Copy()
params.name = 'first_layer_norm'
params.input_dim = p.input_dim
self.CreateChild('first_layer_norm', params)
# Initialize second layer norm.
params = p.ln_tpl.Copy()
params.name = 'second_layer_norm'
params.input_dim = p.input_dim * 2
self.CreateChild('second_layer_norm', params)
# Initialize separable conv.
params = p.separable_conv_tpl.Copy()
params.name = 'separable_conv_11x1_layer'
params.bias = True
params.batch_norm = False
params.activation = p.activation
params.filter_stride = (1, 1)
params.filter_shape = (11, 1, p.input_dim, int(p.input_dim * 2))
self.CreateChild('separable_conv_11x1_layer', params)
# Initialize first separable conv.
params = p.separable_conv_tpl.Copy()
params.name = 'separable_conv_7x1_layer'
params.bias = True
params.batch_norm = False
params.activation = 'NONE'
params.filter_stride = (1, 1)
params.filter_shape = (7, 1, p.input_dim, int(p.input_dim / 2))
self.CreateChild('separable_conv_7x1_layer', params)
# Initialize second separable conv.
params = p.separable_conv_tpl.Copy()
params.name = 'separable_conv_7x1_layer_2'
params.bias = True
params.batch_norm = False
params.activation = 'NONE'
params.filter_stride = (1, 1)
params.filter_shape = (7, 1, int(p.input_dim * 2), p.input_dim)
self.CreateChild('separable_conv_7x1_layer_2', params)
# Initialize dropout.
dropout_tpl = p.dropout_tpl.Copy()
self.CreateChild('dropout', dropout_tpl)
def FProp(self, theta, inputs, paddings):
inputs_normalized = self.first_layer_norm.FProp(theta.first_layer_norm,
inputs)
left_branch = self.separable_conv_11x1_layer.FProp(
theta.separable_conv_11x1_layer,
tf.expand_dims(inputs_normalized, axis=2), paddings)[0]
left_branch = self.dropout.FProp(theta.dropout, left_branch)
right_branch = self.separable_conv_7x1_layer.FProp(
theta.separable_conv_7x1_layer,
tf.expand_dims(inputs_normalized, axis=2), paddings)[0]
right_branch = self.dropout.FProp(theta.dropout, right_branch)
right_branch = tf.pad(
right_branch,
[[0, 0], [0, 0], [0, 0],
[0, tf.shape(left_branch)[-1] - tf.shape(right_branch)[-1]]],
constant_values=0)
hidden_state = left_branch + right_branch
hidden_state = self.second_layer_norm.FProp(theta.second_layer_norm,
hidden_state)
hidden_state = self.separable_conv_7x1_layer_2.FProp(
theta.separable_conv_7x1_layer_2, hidden_state, paddings)[0]
hidden_state = self.dropout.FProp(theta.dropout, hidden_state)
hidden_state = tf.squeeze(hidden_state, axis=2)
return hidden_state + inputs
class EvolvedTransformerBaseLayer(base_layer.BaseLayer):
"""Base layer for the Evolved Transformer."""
@classmethod
def Params(cls):
p = super(EvolvedTransformerBaseLayer, cls).Params()
p.Define('source_dim', 0, 'Dimension of the transformer block input.')
p.Define(
'has_aux_atten', False,
'If set, introduces a second attention layer, which attends to'
' the auxiliary source contexts.')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
return p
class EvolvedTransformerEncoderLayer(EvolvedTransformerBaseLayer):
"""Evolved Transformer encoder layer.
An Evolved Transformer encoder layer as described in
https://arxiv.org/abs/1901.11117 .
"""
@classmethod
def Params(cls):
p = super(EvolvedTransformerEncoderLayer, cls).Params()
p.Define('glu_tpl', layers.GluLayer.Params(), 'Glu layer.')
p.Define('branched_convs_tpl',
EvolvedTransformerEncoderBranchedConvsLayer.Params(),
'Evolved Transformer branched convolutional layers.')
p.Define('transformer_tpl', TransformerLayer.Params(), 'Transformer layer.')
return p
@base_layer.initializer
def __init__(self, params):
super(EvolvedTransformerEncoderLayer, self).__init__(params)
p = self.params
assert p.name
assert p.source_dim
# Auxiliary attention not supported.
if p.has_aux_atten:
raise ValueError('Auxiliary attention not supported.')
with tf.variable_scope(p.name):
# Initialize Glu layer.
params = p.glu_tpl.Copy()
params.name = 'glu_layer'
params.input_dim = p.source_dim
self.CreateChild('glu_layer', params)
# Initialize branched convolutions layer.
params = p.branched_convs_tpl.Copy()
params.name = 'branched_convs_layer'
params.input_dim = p.source_dim
self.CreateChild('branched_convs_layer', params)
# Initialize branched convolutional layers.
params = p.transformer_tpl.Copy()
params.name = 'transformer_layer'
params.source_dim = p.source_dim
params.output_dim = p.source_dim
params.tr_fflayer_tpl.hidden_dim = 4 * p.source_dim
# Decoder functionality is not supported so disable auxiliary attention.
params.has_aux_atten = False
params.tr_aux_atten_tpl = None
params.mask_self_atten = False
params.is_decoder = False
params.packed_input = p.packed_input
self.CreateChild('transformer_layer', params)
def FProp(self,
theta,
source_vecs,
source_paddings,
aux_vecs=None,
aux_paddings=None,
source_segment_id=None,
aux_segment_id=None):
hidden_state = self.glu_layer.FProp(theta.glu_layer, source_vecs,
source_paddings)
hidden_state = tf.transpose(hidden_state, [1, 0, 2])
source_paddings = tf.transpose(source_paddings, [1, 0])
hidden_state = self.branched_convs_layer.FProp(theta.branched_convs_layer,
hidden_state,
source_paddings)
hidden_state = tf.transpose(hidden_state, [1, 0, 2])
source_paddings = tf.transpose(source_paddings, [1, 0])
hidden_state, atten_prob = self.transformer_layer.FProp(
theta.transformer_layer, hidden_state, source_paddings, aux_vecs,
aux_paddings, source_segment_id, aux_segment_id)
return hidden_state, atten_prob
class EvolvedTransformerDecoderLayer(EvolvedTransformerBaseLayer):
"""Evolved Transformer decoder layer.
An Evolved Transformer decoder layer as described in
https://arxiv.org/abs/1901.11117 .
"""
@classmethod
def Params(cls):
p = super(EvolvedTransformerDecoderLayer, cls).Params()
p.Define('tr_atten_tpl',
TransformerAttentionLayer.Params().Set(num_attention_heads=8),
'Transformer attention layer params.')
p.Define('tr_double_heads_atten_tpl',
TransformerAttentionLayer.Params().Set(num_attention_heads=16),
'Transformer double heads attention layer params.')
p.Define('branched_convs_tpl',
EvolvedTransformerDecoderBranchedConvsLayer.Params(),
'Evolved Transformer branched convolutional layers.')
p.Define('transformer_tpl', TransformerLayer.Params(), 'Transformer layer.')
p.Define('tr_aux_atten_tpl', None, 'Transformer Attention Layer params.')
p.Define('mask_self_atten', False, 'If True, use masked self-attention.')
p.has_aux_atten = True
return p
@base_layer.initializer
def __init__(self, params):
super(EvolvedTransformerDecoderLayer, self).__init__(params)
p = self.params
assert p.name
assert p.source_dim
with tf.variable_scope(p.name):
# Initialize multi-headed self-attention.
params = p.tr_double_heads_atten_tpl.Copy()
params.name = 'self_atten_double_heads'
params.source_dim = p.source_dim
params.is_masked = p.mask_self_atten
# Packed input is not supported.
params.packed_input = p.packed_input
self.CreateChild('self_atten_double_heads', params)
if p.has_aux_atten:
# Initialize masked-multi-headed encoder attention.
params = (
p.tr_aux_atten_tpl.Copy()
if p.tr_aux_atten_tpl is not None else p.tr_atten_tpl.Copy())
params.name = 'attend_to_encoder'
params.source_dim = p.source_dim
# Packed input is not supported.
params.packed_input = p.packed_input
self.CreateChild('attend_to_encoder', params)
# Initialize branched convolutional layers.
params = p.branched_convs_tpl.Copy()
params.name = 'branched_convs'
params.input_dim = p.source_dim
self.CreateChild('branched_convs', params)
# Initialize transformer layer.
params = p.transformer_tpl.Copy()
params.name = 'transformer_layer'
params.source_dim = p.source_dim
params.output_dim = p.source_dim
params.tr_fflayer_tpl.hidden_dim = 4 * p.source_dim
params.tr_aux_atten_tpl = p.tr_aux_atten_tpl
params.has_aux_atten = p.has_aux_atten
params.mask_self_atten = p.mask_self_atten
params.tr_fflayer_tpl.activation = 'SWISH'
# Packed input is not supported.
params.packed_input = p.packed_input
self.CreateChild('transformer_layer', params)
def FProp(self,
theta,
source_vecs,
source_paddings,
aux_vecs=None,
aux_paddings=None,
source_segment_id=None,
aux_segment_id=None):
p = self.params
if p.has_aux_atten:
assert aux_vecs is not None
assert aux_paddings is not None
with tf.name_scope('self_atten_double_heads'):
left_branch, _ = self.self_atten_double_heads.FProp(
theta.self_atten_double_heads,
source_vecs,
source_paddings,
query_segment_id=source_segment_id)
if p.has_aux_atten:
with tf.name_scope('attend_to_encoder'):
right_branch, _ = self.attend_to_encoder.FProp(theta.attend_to_encoder,
source_vecs,
aux_paddings, aux_vecs,
source_segment_id,
aux_segment_id)
hidden_state = left_branch + right_branch + source_vecs
else:
hidden_state = left_branch + source_vecs
hidden_state = tf.transpose(hidden_state, [1, 0, 2])
source_paddings = tf.transpose(source_paddings, [1, 0])
hidden_state = self.branched_convs.FProp(theta.branched_convs, hidden_state,
source_paddings)
hidden_state = tf.transpose(hidden_state, [1, 0, 2])
source_paddings = tf.transpose(source_paddings, [1, 0])
hidden_state, atten_prob = self.transformer_layer.FProp(
theta.transformer_layer, hidden_state, source_paddings, aux_vecs,
aux_paddings, source_segment_id, aux_segment_id)
return hidden_state, atten_prob
def ExtendStep(self,
theta,
source_vecs,
prefix_states,
aux_vecs=None,
aux_paddings=None,
t=None):
"""Evolved Transformer decoder layer, extended one step in decoding.
This function is expected to be called during fast decoding of Evolved
Transformer models.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_batch, dim].
prefix_states: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
t: a scalar, the current time step, 0-based.
Returns:
The attention context vector, [target_batch, source_dim].
The attention probability vector, [source_time, target_batch].
Updated prefix states.
"""
p = self.params
if p.has_aux_atten:
assert aux_vecs is not None
assert aux_paddings is not None
inputs = tf.expand_dims(source_vecs, axis=0)
new_states = prefix_states
double_head_attention_states = prefix_states.double_head_attention_states
# First the self-attention layer.
(left_branch, _,
double_head_attention_states) = self.self_atten_double_heads.ExtendStep(
theta.self_atten_double_heads, source_vecs,
double_head_attention_states, t)
new_states.double_head_attention_states = double_head_attention_states
left_branch = tf.expand_dims(left_branch, axis=0)
hidden_state = left_branch + inputs
# Next the source attention layer.
if p.has_aux_atten:
hidden_state += self.attend_to_encoder.FProp(theta.attend_to_encoder,
inputs, aux_paddings,
aux_vecs)[0]
branched_convs_input = prefix_states.branched_convs_input
branched_convs_input = tf.concat([branched_convs_input, hidden_state],
axis=0)
new_states.branched_convs_input = branched_convs_input
# The receptive field of the branched convs is 17 and so we do not need
# to consider inputs that come before that to compute the final position.
# TODO(davidso): Create an ExtendStep method for branched_convs to make this
# more efficient.
inputs_length = tf.minimum(tf.shape(branched_convs_input)[0], 17)
branched_convs_input = branched_convs_input[-inputs_length:, :, :]
branched_convs_input = tf.transpose(branched_convs_input, [1, 0, 2])
hidden_state = self.branched_convs.FProp(theta.branched_convs,
branched_convs_input, None)
hidden_state = tf.transpose(hidden_state, [1, 0, 2])
transformer_layer_input = tf.squeeze(hidden_state[-1, :, :])
transformer_layer_states = prefix_states.transformer_layer_states
(hidden_state, atten_prob,
transformer_layer_states) = self.transformer_layer.ExtendStep(
theta.transformer_layer,
transformer_layer_input,
transformer_layer_states,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
t=t)
new_states.transformer_layer_states = transformer_layer_states
return hidden_state, atten_prob, new_states
class StyleLayer(base_layer.BaseLayer):
"""A layer that performs weighted style emb lookup."""
@classmethod
def Params(cls):
p = super(StyleLayer, cls).Params()
p.Define('input_dim', 0, 'Dimension of the input.')
p.Define('output_dim', 0, 'Dimension of the output.')
p.Define('num_styles', 0, 'Num of styles.')
p.Define('num_heads', 4, 'Number of attention heads.')
p.Define(
'enable_ctx_post_proj', True,
'If True, computed context is post projected into'
' ctx_post_proj_dim.')
return p
@base_layer.initializer
def __init__(self, params):
super(StyleLayer, self).__init__(params)
p = self.params
assert p.num_styles > 0
assert p.input_dim > 0
assert p.output_dim > 0
with tf.variable_scope(p.name):
# The styles table.
w_shape = [p.num_styles, 1, p.output_dim]
w_init = py_utils.WeightInit.Gaussian(scale=1.0, seed=p.random_seed)
w_pc = py_utils.WeightParams(
shape=w_shape,
init=w_init,
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('styles_w', w_pc)
# Lastly the attention module.
atten_p = attention.MultiHeadedAttention.Params().Set(
source_dim=p.output_dim,
context_dim=p.output_dim,
hidden_dim=p.output_dim,
query_dim=p.input_dim,
ctx_post_proj_dim=p.output_dim,
num_attention_heads=p.num_heads,
use_source_vec_as_attention_value=False,
enable_ctx_post_proj=p.enable_ctx_post_proj)
self.CreateChild('atten', atten_p)
def EmbLookup(self, theta, ids):
"""Looks up style embedding vectors for ids only for test purpose.
Args:
theta: Named tuple with the weight matrix for the embedding.
ids: A rank-N int32 tensor.
Returns:
embs, A rank-(N+1) params.dtype tensor.
embs[indices, :] is the embedding vector for ids[indices].
"""
p = self.params
# TODO(ngyuzh): call this function for virsualize big discrete table,
# e.g. num_styles > 2^10.
embs = tf.nn.embedding_lookup(theta.styles_w, tf.reshape(ids, [-1]))
out_shape = tf.concat([tf.shape(ids), [p.output_dim]], 0)
return tf.reshape(tf.nn.tanh(embs), out_shape)
def StyleEmbFromProbs(self, theta, inp):
"""Look up style embedding based on feedin probabilities.
Args:
theta: params for this layer and its sub-layers.
inp: attention probabilities of shape [batch_size, num_styles].
Returns:
style_emb - weighted combined style embedding based on inp.
"""
p = self.params
b_size = tf.shape(inp)[0]
styles_w = tf.tile(tf.nn.tanh(theta.styles_w), [1, b_size, 1])
styles_paddings = tf.zeros([p.num_styles, b_size],
dtype=py_utils.FPropDtype(p))
atten_probs = tf.tile(tf.expand_dims(inp, 1), [1, p.num_heads, 1])
atten_probs = tf.reshape(atten_probs, [-1, p.num_styles])
packed_src = self.atten.InitForSourcePacked(theta.atten, styles_w, styles_w,
styles_paddings)
style_emb, _ = self.atten.ComputeContextVectorWithAttenProbs(
theta.atten, packed_src.source_contexts, atten_probs)
return style_emb
def FProp(self, theta, inp):
"""Look up style embedding."""
p = self.params
b_size = tf.shape(inp)[0]
styles_w = tf.tile(tf.nn.tanh(theta.styles_w), [1, b_size, 1])
styles_paddings = tf.zeros([p.num_styles, b_size],
dtype=py_utils.FPropDtype(p))
packed_src = self.atten.InitForSourcePacked(theta.atten, styles_w, styles_w,
styles_paddings)
style_emb, probs, _ = self.atten.ComputeContextVectorWithSource(
theta.atten, packed_src, inp)
# TODO(yonghui): Extract and return the attention probabilities.
return style_emb, probs
class TransformerLayerWithMultitaskAdapters(TransformerLayer):
"""Transformer Layer with multitask residual adapters.
Applies transformer layer, followed by multitask adapters. Requires an
additional input specifying the task_id for each input.
"""
@classmethod
def Params(cls):
p = super(TransformerLayerWithMultitaskAdapters, cls).Params()
p.Define('adapter_tpl', layers.MultitaskAdapterLayer.Params(),
'Template to use for multitask adapters.')
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLayerWithMultitaskAdapters, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
params = p.adapter_tpl.Copy()
params.name = 'adapters'
self.CreateChild('adapters', params)
def FProp(self,
theta,
source_vecs,
source_paddings,
aux_vecs=None,
aux_paddings=None,
source_segment_id=None,
aux_segment_id=None,
source_task_id=None):
"""Transformer Layer with multitask adapters.
First applies the standard transformer layer. Then applies adapter layers.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_time, source_batch, dim].
source_paddings: [source_time, source_batch]
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
source_segment_id: [source_time, source_batch]
aux_segment_id: [aux_time, aux_batch]
source_task_id: [source_time, source_batch]
Returns:
The attention context vector, [source_time, source_batch, dim].
The attention probability vector, [source_time, source_batch, source_time]
if has_aux_atten is False, otherwise [source_time, source_batch,
aux_time].
"""
p = self.params
hidden, atten_prob = super(TransformerLayerWithMultitaskAdapters,
self).FProp(theta, source_vecs, source_paddings,
aux_vecs, aux_paddings,
source_segment_id, aux_segment_id)
# Assumes the same task_id for the entire sequence during eval or when
# not using packed_input.
if not p.packed_input and not self.do_eval:
source_task_id = source_task_id[0, :]
hidden = self.adapters.FProp(theta.adapters, hidden, source_task_id)
return hidden, atten_prob
def ExtendStep(self,
theta,
source_vecs,
prefix_states,
aux_vecs=None,
aux_paddings=None,
timestep=None,
source_task_id=None):
"""Transformer Layer with adapters, extend one step in decoding.
Applies TransformerLayer.ExtendStep, then applies adapters.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_batch, dim].
prefix_states: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
timestep: a scalar, the current time step, 0-based.
source_task_id: [source_batch]
Returns:
The attention context vector, [target_batch, source_dim]
The attention probability vector, [source_time, target_batch]
Updated prefix states
"""
p = self.params
if p.has_aux_atten:
assert aux_vecs is not None
assert aux_paddings is not None
batch_size = tf.shape(source_vecs)[0]
# First the self-attention layer.
atten_vec, atten_prob, new_states = self.self_atten.ExtendStep(
theta.self_atten, source_vecs, prefix_states, timestep)
atten_vec = tf.expand_dims(atten_vec, axis=0)
# Next the source attention layer.
if p.has_aux_atten:
atten_vec, atten_prob = self.atten.FProp(theta.atten, atten_vec,
aux_paddings, aux_vecs)
# Finally, the feedforward layer.
hidden = self.fflayer.FProp(
theta.fflayer, atten_vec,
tf.zeros([1, batch_size], dtype=py_utils.FPropDtype(p)))
# Now adapter layers.
hidden = self.adapters.FProp(theta.adapters, hidden, source_task_id)
hidden = tf.squeeze(hidden, 0)
return hidden, atten_prob, new_states
# TODO(ankurbpn): Implementation is slightly different from the original.
# In the original implementation the KV projection outputs were explicitly
# zeroed out by the gating networks. Here we control the inputs instead.
# Verify if this still works as well as the original implementation.
class CCTAttentionLayer(base_layer.BaseLayer):
"""Multi-headed attention, add and norm used by 'Attention Is All You Need'.
Supports CCT attention gating as in the paper here:
https://arxiv.org/abs/2002.07106
"""
@classmethod
def Params(cls):
p = super(CCTAttentionLayer, cls).Params()
# Transformer Attention params.
p.Define('source_dim', 0, 'Dimension of the transformer block input.')
p.Define('context_dim', 0, 'Dimension of the attention contexts.')
p.Define('atten_hidden_dim', 0, 'Dimension of the attention hidden dim.')
p.Define('num_attention_heads', 8, 'Number of attention heads.')
p.Define('is_masked', False, 'If set, uses masked MultiHeadedAttention.')
p.Define(
'mask_type', 'future', 'Type of attention mask if `is_masked` is'
'set. Either "future" for masking out attention to future'
'positions or "eye" for masking out the token itself.')
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params')
p.Define(
'atten_tpl',
attention.MultiHeadedAttention.Params().Set(
use_source_vec_as_attention_value=False, enable_ctx_post_proj=True),
'Multi-Headed Dot-Attention default params')
p.Define(
'atten_dropout_prob', 0.0,
'Probability at which we apply dropout to the attention probs. '
'This practically drops memory values at random positions.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, y) = (x + dropout(y)).')
p.Define(
'residual_dropout_tpl', layers.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
p.Define('add_unnormalized_input', False, 'If set, uses unnormalized input '
'in the residual add.')
# CCT params.
p.Define('gating_tpl', layers.CCTGatingNetwork.Params(), '')
return p
@base_layer.initializer
def __init__(self, params):
super(CCTAttentionLayer, self).__init__(params)
p = self.params
assert p.name
assert p.source_dim
if not p.atten_hidden_dim:
p.atten_hidden_dim = p.source_dim
if not p.context_dim:
p.context_dim = p.source_dim
if p.is_masked:
assert p.mask_type in ['future', 'eye']
with tf.variable_scope(p.name):
# Initialize multi-headed attention
params = p.atten_tpl.Copy()
params.name = 'multihead_atten'
params.source_dim = p.source_dim
params.query_dim = p.source_dim
params.hidden_dim = p.atten_hidden_dim
params.context_dim = p.context_dim
params.ctx_post_proj_dim = p.source_dim
params.num_attention_heads = p.num_attention_heads
params.atten_dropout_prob = p.atten_dropout_prob
params.packed_input = p.packed_input
self.CreateChild('atten', params)
dropout_tpl = p.residual_dropout_tpl.Copy()
dropout_tpl.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', dropout_tpl)
# Initialize attention layer norm
params = p.ln_tpl.Copy()
params.name = 'atten_ln'
params.input_dim = p.source_dim
self.CreateChild('layer_norm', params)
# CCT specific operations.
ff_gating = p.gating_tpl.Copy()
ff_gating.input_dim = p.source_dim
ff_gating.num_outputs = 1
ff_gating.name = 'query_gating_net'
self.CreateChild('query_gating', ff_gating)
ff_gating = p.gating_tpl.Copy()
ff_gating.input_dim = p.source_dim
ff_gating.num_outputs = 1
ff_gating.name = 'kv_gating_net'
self.CreateChild('kv_gating', ff_gating)
# Initialize source_vec layer norm
params = p.ln_tpl.Copy()
params.name = 'source_ln'
params.input_dim = p.source_dim
self.CreateChild('source_layer_norm', params)
# Initialize ctx_vec layer norm
params = p.ln_tpl.Copy()
params.name = 'ctx_ln'
params.input_dim = p.source_dim
self.CreateChild('ctx_layer_norm', params)
def FProp(self,
theta,
query_vec,
source_paddings,
source_vecs=None,
query_segment_id=None,
source_segment_id=None,
**kwargs):
"""CCT attention, residual and normalization layer.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_time, target_batch, dim]
source_paddings: [source_time, source_batch]
source_vecs: [source_time, source_batch, dim].
query_segment_id: [target_time, target_batch]
source_segment_id: [source_time, source_batch]
**kwargs: Can be optional params for the attention layer, eg. attention
projection index tensor.
Returns:
(output, atten_probs). output is of shape [target_time, target_batch,
context_dim], atten_probs is of shape [target_time, target_batch,
source_time].
"""
p = self.params
unnormalized_query_vec = query_vec
query_vec = self.layer_norm.FProp(theta.layer_norm, query_vec)
if source_vecs is None: # For self-attention: keys = queries.
source_vecs = query_vec
source_segment_id = query_segment_id
else:
source_vecs = self.source_layer_norm.FProp(theta.source_layer_norm,
source_vecs)
# Gating the query computation.
query_p_c = self.query_gating.FProp(theta.query_gating, query_vec)
source_p_c = self.kv_gating.FProp(theta.kv_gating, source_vecs)
source_vecs *= source_p_c # Gate the source vectors.
if p.is_masked:
assert source_vecs is not None
query_vec = py_utils.with_dependencies([
py_utils.assert_shape_match(
tf.shape(source_vecs), tf.shape(query_vec))
], query_vec)
# Prepares mask for self-attention
# [time, time]
target_time = tf.shape(query_vec)[0]
target_bs = tf.shape(query_vec)[1]
if p.mask_type == 'future':
padding = 1.0 - tf.linalg.band_part(
tf.ones([target_time, target_time], dtype=py_utils.FPropDtype(p)),
-1, 0)
elif p.mask_type == 'eye':
padding = tf.eye(target_time, target_time, dtype=py_utils.FPropDtype(p))
# [time, batch, time]
causal_padding = tf.tile(tf.expand_dims(padding, 1), [1, target_bs, 1])
causal_padding = tf.reshape(causal_padding, [-1, target_time])
else:
causal_padding = None
query_dim = tf.shape(query_vec)[-1]
# Projects keys and values.
packed_src = self.atten.PackSource(
theta=theta.atten,
source_vecs=source_vecs, # keys
source_contexts=source_vecs, # values
source_padding=source_paddings,
source_segment_id=source_segment_id)
if query_segment_id is not None:
query_segment_id = tf.reshape(query_segment_id, [-1])
ctx_vec, atten_prob, _ = self.atten.ComputeContextVectorWithSource(
theta=theta.atten,
packed_src=packed_src,
query_vec=tf.reshape(query_vec, [-1, query_dim]),
per_step_source_padding=causal_padding,
query_segment_id=query_segment_id,
**kwargs)
# Gating operations
ctx_vec = query_p_c * tf.reshape(
self.ctx_layer_norm.FProp(theta.ctx_layer_norm, ctx_vec),
tf.shape(query_vec))
ctx_vec = self.residual_dropout.FProp(theta.residual_dropout, ctx_vec)
input_to_add = (
unnormalized_query_vec if p.add_unnormalized_input else query_vec)
h = input_to_add + ctx_vec
atten_prob = tf.reshape(atten_prob, [
tf.shape(query_vec)[0],
tf.shape(query_vec)[1],
tf.shape(source_vecs)[0]
])
return h, atten_prob, query_p_c, source_p_c
def _FinishExtendStep(self,
theta,
query_vec,
unnormalized_query_vec,
extended_packed_src,
t=None):
"""Finish extending prefix by one more time step.
Isolating this function from ExtendStep allows generalizing self-attention
to causal attention on other inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_batch, dim]
unnormalized_query_vec: [target_batch, dim]
extended_packed_src: A `.NestedMap` object containing source_vecs,
source_contexts, source_paddings, and source_segment_ids
t: a scalar, the current time step, 0-based.
Returns:
A triplet (cur_output, atten_prob, new_state) where cur_output is a tensor
representing the output from the current state, and new_state is the new
state `.NestedMap`.
"""
p = self.params
# Gating operations
query_p_c = self.query_gating.FProp(theta.query_gating, query_vec)
if t is not None:
source_seq_len = tf.shape(extended_packed_src.source_vecs)[0]
zero_padding = tf.fill([source_seq_len],
tf.constant(0.0, dtype=query_vec.dtype))
per_step_source_padding = tf.where(
tf.less(tf.range(source_seq_len), tf.fill([source_seq_len], t + 1)),
zero_padding, tf.ones_like(zero_padding, dtype=query_vec.dtype))
query_batch_size = tf.shape(query_vec)[0]
per_step_source_padding = tf.tile(
tf.expand_dims(per_step_source_padding, axis=0),
[query_batch_size, 1])
else:
per_step_source_padding = None
ctx_vec, atten_prob, _ = self.atten.ComputeContextVectorWithCachedSource(
theta.atten,
extended_packed_src,
query_vec,
per_step_source_padding=per_step_source_padding)
# Gating operations
ctx_vec = self.ctx_layer_norm.FProp(theta.ctx_layer_norm, ctx_vec)
ctx_vec = query_p_c * tf.reshape(ctx_vec, tf.shape(query_vec))
ctx_vec = self.residual_dropout.FProp(theta.residual_dropout, ctx_vec)
input_to_add = (
unnormalized_query_vec if p.add_unnormalized_input else query_vec)
h = input_to_add + ctx_vec
new_states = py_utils.NestedMap(
key=extended_packed_src.source_vecs,
value=extended_packed_src.source_contexts)
return h, atten_prob, new_states
def ExtendStep(self, theta, query_vec, prefix_state, t=None):
"""Extend prefix by one more time step.
This function is expected to be called during fast decoding of the
Transformer model.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query_vec: [target_batch, dim]
prefix_state: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
t: a scalar, the current time step, 0-based.
Returns:
A triplet (cur_output, atten_prob, new_state) where cur_output is a tensor
representing the output from the current state, and new_state is the new
state `.NestedMap`.
"""
p = self.params
assert p.is_masked # Must be causal attention.
# Gating operations
unnormalized_query_vec = query_vec
query_vec = self.layer_norm.FProp(theta.layer_norm, query_vec)
source_p_c = self.kv_gating.FProp(theta.kv_gating, query_vec)
source_vec = source_p_c * query_vec
cached_packed_src = py_utils.NestedMap(
source_vecs=prefix_state.key,
source_contexts=prefix_state.value,
source_padding=None,
source_segment_id=None)
extended_packed_src = self.atten.ExtendSourcePacked(theta.atten, source_vec,
source_vec, None, None,
cached_packed_src, t)
return self._FinishExtendStep(theta, query_vec, unnormalized_query_vec,
extended_packed_src, t)
class CCTFeedForwardLayer(base_layer.BaseLayer):
"""Transformer FF layer with CCT gating.
https://arxiv.org/abs/2002.07106
Differences from standard Transformer FF layer:
1. Each feedforward layer is divided into num_blocks smaller layers (divided
along the hidden dimension).
2. Each block has its separate input layer norm.
3. Each block has its separate output layer norm.
4. Outputs from each block are gated with CCTGatingNetwork output - which is
between 0 and 1 for training and either 0 or 1 during inference.
"""
@classmethod
def Params(cls):
p = super(CCTFeedForwardLayer, cls).Params()
# Transformer Feedforward params.
p.Define('input_dim', 0, 'Dimension of the layer input.')
p.Define('output_dim', 0, 'Dimension of the layer output.') # Deprecated.
p.Define('hidden_dim', 0, 'Dimension of the hidden layer.')
p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params')
p.Define('activation', 'RELU', 'Non-linearity.')
p.Define('fflayer_tpl',
layers.FeedForwardNet.Params().Set(activation=['RELU', 'NONE']),
'Feed forward layer default params')
p.Define(
'res_proj_tpl', layers.ProjectionLayer.Params(),
'Residual projection default params, used when input_dim != '
'output_dim.')
p.Define(
'residual_dropout_prob', 0.0,
'Probability at which we apply dropout to the residual layers, '
'such that, residual(x, y) = (x + dropout(y)).')
p.Define(
'residual_dropout_tpl', layers.DropoutLayer.Params(),
'Residual dropout params template. keep_prop will be reset to '
'(1.0 - residual_dropout_prob).')
p.Define(
'relu_dropout_prob', 0.0,
'Probability at which we apply dropout to the hidden layer '
'of feed-forward network.')
# Expert params.
p.Define('num_blocks', 1, 'Number of separately gated ff blocks.')
p.Define('gating_tpl', layers.CCTGatingNetwork.Params(), 'gating template.')
return p
@base_layer.initializer
def __init__(self, params):
super(CCTFeedForwardLayer, self).__init__(params)
p = self.params
assert p.name
assert p.input_dim
assert p.hidden_dim
assert not p.output_dim, 'output_dim should not be set.'
with tf.variable_scope(p.name):
# Initialize feed-forward layer
params = p.fflayer_tpl.Copy()
params.name = 'fflayer'
params.input_dim = p.input_dim
params.activation = [p.activation, 'NONE']
if p.output_dim == 0:
params.hidden_layer_dims = [p.hidden_dim, p.input_dim]
else:
params.hidden_layer_dims = [p.hidden_dim, p.output_dim]
params.dropout = [
params.dropout.cls.Params().Set(keep_prob=1.0 - p.relu_dropout_prob),
params.dropout.cls.Params().Set(keep_prob=1.0)
]
ffs = []
ln_params = []
out_layer_norm = [] # Required for stabilizing CCT.
for i in range(p.num_blocks):
ff_p = params.Copy()
ff_p.name += '_%d' % i
ffs.append(ff_p)
ln_p = p.ln_tpl.Copy()
ln_p.name = 'fflayer_ln_%d' % i
ln_p.input_dim = p.input_dim
ln_params.append(ln_p)
ln_p = p.ln_tpl.Copy()
ln_p.name = 'fflayer_ln_out_%d' % i
ln_p.input_dim = p.input_dim
out_layer_norm.append(ln_p)
self.CreateChildren('fflayers', ffs)
self.CreateChildren('layer_norm', ln_params)
self.CreateChildren('out_layer_norm', out_layer_norm)
# Note: Set gating noise and warmup in parent layer.
ff_gating = p.gating_tpl.Copy()
ff_gating.input_dim = p.input_dim
ff_gating.num_outputs = p.num_blocks
ff_gating.name = 'gating_net'
self.CreateChild('ff_gating', ff_gating)
dropout_tpl = p.residual_dropout_tpl.Copy()
dropout_tpl.keep_prob = (1.0 - p.residual_dropout_prob)
self.CreateChild('residual_dropout', dropout_tpl)
def FProp(self, theta, inputs, paddings):
"""Feed-forward, layer-norm, residual, gating and layer-norm.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: [time, batch, dim].
paddings: [time, batch]
Returns:
tensor of the same shape with inputs
"""
p = self.params
ff_outputs = []
for i in range(p.num_blocks):
inputs_normalized = self.layer_norm[i].FProp(theta.layer_norm[i], inputs)
ff_output = self.fflayers[i].FProp(
theta.fflayers[i],
inputs_normalized,
paddings=tf.expand_dims(paddings, -1))
ff_output = self.out_layer_norm[i].FProp(theta.out_layer_norm[i],
ff_output)
ff_outputs.append(ff_output)
p_c = self.ff_gating.FProp(theta.ff_gating, inputs_normalized)
out = inputs + self.residual_dropout.FProp(
theta.residual_dropout,
tf.reduce_sum(
tf.expand_dims(p_c, -1) * tf.stack(ff_outputs, -2), axis=-2))
return out, p_c
class TransformerWithContextLayer(base_layer.BaseLayer):
"""A transformer layer with 3 attention layers.
The same as layers_with_attention.TransformerLayer, but with an
additional attention layer to attend to a third transformer stack
representing context.
self-attention => context attention (newly added as tertiary_atten) =>
encoder attention (named aux_atten in TransformerLayer).
The weights are *not* shared between these three attention layers.
See https://arxiv.org/pdf/1810.03581.pdf
"""
@classmethod
def Params(cls):
p = super(TransformerWithContextLayer, cls).Params()
p.Define('source_dim', 0, 'Dimension of the transformer block input.')
p.Define('output_dim', 0, 'Dimension of the transformer block output.')
p.Define(
'tr_atten_tpl',
TransformerAttentionLayer.Params().Set(num_attention_heads=8),
'Transformer Attention Layer params. The same template is applied '
'to all three attention layers.')
p.Define('tr_fflayer_tpl',
TransformerFeedForwardLayer.Params().Set(hidden_dim=2048),
'Transformer Feed-Forward Layer params.')
p.Define('packed_input', False,
'If True, each training example may pack multiple sequences.')
# removed: p.has_aux_atten and p.mask_self_atten: they are always True,
# removed: p.num_aux_atten_post_proj, p.tr_post_ln_tpl
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerWithContextLayer, self).__init__(params)
p = self.params
if not p.source_dim:
raise ValueError('p.source_dim not set')
with tf.variable_scope(p.name):
# Initialize multi-headed self-attention
params = p.tr_atten_tpl.Copy()
params.name = 'multihead_self_atten'
params.source_dim = p.source_dim
params.packed_input = p.packed_input
params.is_masked = True
self.CreateChild('self_atten', params)
# Initialize tertiary attention.
params = p.tr_atten_tpl.Copy()
params.name = 'tertiary_multihead_atten'
params.source_dim = p.source_dim
params.packed_input = p.packed_input
self.CreateChild('tertiary_atten', params)
# Initialize multi-headed encoder attention
params = p.tr_atten_tpl.Copy()
params.name = 'multihead_atten'
params.source_dim = p.source_dim
params.packed_input = p.packed_input
self.CreateChild('atten', params)
# Initialize feed-forward layer
params = p.tr_fflayer_tpl.Copy()
params.name = 'tr_fflayer'
params.input_dim = p.source_dim
params.output_dim = p.output_dim
self.CreateChild('fflayer', params)
def FProp(self,
theta,
source_vecs,
source_paddings,
aux_vecs,
aux_paddings,
tertiary_vecs,
tertiary_paddings,
source_segment_id=None,
aux_segment_id=None,
tertiary_segment_id=None):
"""Transformer Layer.
Please see docstring of TransformerAttentionLayer.FProp.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_time, source_batch, dim].
source_paddings: [source_time, source_batch]
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch]
tertiary_vecs: [tertiary_time, tertiary_batch, dim]
tertiary_paddings: [tertiary_time, tertiary_batch]
source_segment_id: [source_time, source_batch]
aux_segment_id: [aux_time, aux_batch]
tertiary_segment_id: [tertiary_time, tertiary_batch]
Returns:
The attention context vector, [source_time, source_batch, dim].
The attention probability vector, [source_time, source_batch, aux_time].
"""
p = self.params
if p.packed_input:
assert source_segment_id is not None, ('Need to specify segment id for '
'packed input.')
assert aux_segment_id is not None, ('Need to specify segment id for '
'packed input.')
assert tertiary_segment_id is not None, ('Need to specify segment id for '
'packed input.')
atten_vec, atten_prob = self.self_atten.FProp(
theta.self_atten,
source_vecs,
source_paddings,
query_segment_id=source_segment_id)
atten_vec, atten_prob = self.tertiary_atten.FProp(
theta.tertiary_atten, atten_vec, tertiary_paddings, tertiary_vecs,
source_segment_id, tertiary_segment_id)
atten_vec, atten_prob = self.atten.FProp(theta.atten, atten_vec,
aux_paddings, aux_vecs,
source_segment_id, aux_segment_id)
h = self.fflayer.FProp(theta.fflayer, atten_vec, source_paddings)
return h, atten_prob
def ExtendStep(self,
theta,
source_vecs,
prefix_states,
aux_vecs,
aux_paddings,
tertiary_vecs,
tertiary_paddings,
t=None):
"""Transformer Layer, extend one step in decoding.
Please see docstring of TransformerAttentionLayer.ExtendStep.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
source_vecs: [source_batch, dim].
prefix_states: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
aux_vecs: [aux_time, aux_batch, dim]
aux_paddings: [aux_time, aux_batch] tertiary_vecs=None,
tertiary_paddings=None,
tertiary_vecs: [tertiary_time, tertiary_batch, dim]
tertiary_paddings: [tertiary_time, tertiary_batch]
t: a scalar, the current time step, 0-based.
Returns:
The attention context vector, [target_batch, source_dim]
The attention probability vector from the encoder attention layer (the
last attention layer) only, [source_time, target_batch].
TODO(zhouwk): Return also the attention prob from the tertiary attention.
Updated prefix states
"""
p = self.params
batch_size = py_utils.GetShape(source_vecs)[0]
# First the self-attention layer.
atten_vec, _, new_states = self.self_atten.ExtendStep(
theta.self_atten, source_vecs, prefix_states, t)
# Next the context attention (tertiary_atten) layer.
atten_vec = tf.expand_dims(atten_vec, axis=0)
atten_vec, _ = self.tertiary_atten.FProp(theta.tertiary_atten, atten_vec,
tertiary_paddings, tertiary_vecs)
# Next the source attention (aux_atten) layer.
atten_vec, atten_prob = self.atten.FProp(theta.atten, atten_vec,
aux_paddings, aux_vecs)
# Finally, the feedforward layer.
h = self.fflayer.FProp(
theta.fflayer, atten_vec,
tf.zeros([1, batch_size], dtype=py_utils.FPropDtype(p)))
h = tf.squeeze(h, 0)
return h, atten_prob, new_states
|
mlperf/training_results_v0.7
|
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-512/lingvo/core/layers_with_attention.py
|
Python
|
apache-2.0
| 85,989
|
[
"Gaussian"
] |
d352f3c32233430ddd270f727fbc142f7e9898ee0e9cb178bcf82e3b0c4b8c02
|
from rpython.rtyper.lltypesystem.lltype import (Struct, Array, FixedSizeArray,
FuncType, typeOf, GcStruct, GcArray, RttiStruct, ContainerType, parentlink,
Void, OpaqueType, Float, RuntimeTypeInfo, getRuntimeTypeInfo, Char,
_subarray)
from rpython.rtyper.lltypesystem import llmemory, llgroup
from rpython.translator.c.funcgen import FunctionCodeGenerator
from rpython.translator.c.external import CExternalFunctionCodeGenerator
from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring
from rpython.translator.c.support import cdecl, forward_cdecl, somelettersfrom
from rpython.translator.c.support import c_char_array_constant, barebonearray
from rpython.translator.c.primitive import PrimitiveType, name_signed
from rpython.rlib import exports
from rpython.rlib.rfloat import isfinite, isinf
def needs_gcheader(T):
if not isinstance(T, ContainerType):
return False
if T._gckind != 'gc':
return False
if isinstance(T, GcStruct):
if T._first_struct() != (None, None):
return False # gcheader already in the first field
return True
class Node(object):
__slots__ = ("db", )
def __init__(self, db):
self.db = db
class NodeWithDependencies(Node):
__slots__ = ("dependencies", )
def __init__(self, db):
Node.__init__(self, db)
self.dependencies = set()
class StructDefNode(NodeWithDependencies):
typetag = 'struct'
extra_union_for_varlength = True
def __init__(self, db, STRUCT, varlength=None):
NodeWithDependencies.__init__(self, db)
self.STRUCT = STRUCT
self.LLTYPE = STRUCT
self.varlength = varlength
if varlength is None:
basename = STRUCT._name
with_number = True
else:
basename = db.gettypedefnode(STRUCT).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
if STRUCT._hints.get('union'):
self.typetag = 'union'
assert STRUCT._gckind == 'raw' # not supported: "GcUnion"
if STRUCT._hints.get('typedef'):
self.typetag = ''
assert STRUCT._hints.get('external')
if self.STRUCT._hints.get('external'): # XXX hack
self.forward_decl = None
if STRUCT._hints.get('c_name'):
self.barename = self.name = STRUCT._hints['c_name']
self.c_struct_field_name = self.verbatim_field_name
else:
(self.barename,
self.name) = db.namespace.uniquename(basename,
with_number=with_number,
bare=True)
self.prefix = somelettersfrom(STRUCT._name) + '_'
#
self.fieldnames = STRUCT._names
if STRUCT._hints.get('typeptr', False):
if db.gcpolicy.need_no_typeptr():
assert self.fieldnames == ('typeptr',)
self.fieldnames = ()
#
self.fulltypename = '%s %s @' % (self.typetag, self.name)
def setup(self):
# this computes self.fields
if self.STRUCT._hints.get('external'): # XXX hack
self.fields = None # external definition only
return
self.fields = []
db = self.db
STRUCT = self.STRUCT
if self.varlength is not None:
self.normalizedtypename = db.gettype(STRUCT, who_asks=self)
if needs_gcheader(self.STRUCT):
HDR = db.gcpolicy.struct_gcheader_definition(self)
if HDR is not None:
gc_field = ("_gcheader", db.gettype(HDR, who_asks=self))
self.fields.append(gc_field)
for name in self.fieldnames:
T = self.c_struct_field_type(name)
if name == STRUCT._arrayfld:
typename = db.gettype(T, varlength=self.varlength,
who_asks=self)
else:
typename = db.gettype(T, who_asks=self)
self.fields.append((self.c_struct_field_name(name), typename))
self.computegcinfo(self.db.gcpolicy)
def computegcinfo(self, gcpolicy):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
rtti = None
STRUCT = self.STRUCT
if isinstance(STRUCT, RttiStruct):
try:
rtti = getRuntimeTypeInfo(STRUCT)
except ValueError:
pass
if self.varlength is None:
gcpolicy.struct_setup(self, rtti)
return self.gcinfo
def gettype(self):
return self.fulltypename
def c_struct_field_name(self, name):
# occasionally overridden in __init__():
# self.c_struct_field_name = self.verbatim_field_name
return self.prefix + name
def verbatim_field_name(self, name):
assert name.startswith('c_') # produced in this way by rffi
return name[2:]
def c_struct_field_type(self, name):
return self.STRUCT._flds[name]
def access_expr(self, baseexpr, fldname):
fldname = self.c_struct_field_name(fldname)
return '%s.%s' % (baseexpr, fldname)
def ptr_access_expr(self, baseexpr, fldname, baseexpr_is_const=False):
fldname = self.c_struct_field_name(fldname)
if baseexpr_is_const:
return '%s->%s' % (baseexpr, fldname)
return 'RPyField(%s, %s)' % (baseexpr, fldname)
def definition(self):
if self.fields is None: # external definition only
return
yield '%s %s {' % (self.typetag, self.name)
is_empty = True
for name, typename in self.fields:
line = '%s;' % cdecl(typename, name)
if typename == PrimitiveType[Void]:
line = '/* %s */' % line
else:
if is_empty and typename.endswith('[RPY_VARLENGTH]'):
yield '\tRPY_DUMMY_VARLENGTH'
is_empty = False
yield '\t' + line
if is_empty:
yield '\t' + 'char _dummy; /* this struct is empty */'
yield '};'
if self.varlength is not None:
assert self.typetag == 'struct'
yield 'union %su {' % self.name
yield ' struct %s a;' % self.name
yield ' %s;' % cdecl(self.normalizedtypename, 'b')
yield '};'
def visitor_lines(self, prefix, on_field):
for name in self.fieldnames:
FIELD_T = self.c_struct_field_type(name)
cname = self.c_struct_field_name(name)
for line in on_field('%s.%s' % (prefix, cname),
FIELD_T):
yield line
def deflength(varlength):
if varlength is None:
return 'RPY_VARLENGTH'
elif varlength == 0:
return 'RPY_LENGTH0'
else:
return varlength
class ArrayDefNode(NodeWithDependencies):
typetag = 'struct'
extra_union_for_varlength = True
def __init__(self, db, ARRAY, varlength=None):
NodeWithDependencies.__init__(self, db)
self.ARRAY = ARRAY
self.LLTYPE = ARRAY
self.gcfields = []
self.varlength = varlength
if varlength is None:
basename = 'array'
with_number = True
else:
basename = db.gettypedefnode(ARRAY).barename
basename = '%s_len%d' % (basename, varlength)
with_number = False
(self.barename,
self.name) = db.namespace.uniquename(basename, with_number=with_number,
bare=True)
self.fulltypename = '%s %s @' % (self.typetag, self.name)
self.fullptrtypename = '%s %s *@' % (self.typetag, self.name)
def setup(self):
if hasattr(self, 'itemtypename'):
return # setup() was already called, likely by __init__
db = self.db
ARRAY = self.ARRAY
self.computegcinfo(db.gcpolicy)
if self.varlength is not None:
self.normalizedtypename = db.gettype(ARRAY, who_asks=self)
if needs_gcheader(ARRAY):
HDR = db.gcpolicy.array_gcheader_definition(self)
if HDR is not None:
gc_field = ("_gcheader", db.gettype(HDR, who_asks=self))
self.gcfields.append(gc_field)
self.itemtypename = db.gettype(ARRAY.OF, who_asks=self)
def computegcinfo(self, gcpolicy):
# let the gcpolicy do its own setup
self.gcinfo = None # unless overwritten below
if self.varlength is None:
gcpolicy.array_setup(self)
return self.gcinfo
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index):
return '%s.items[%s]' % (baseexpr, index)
access_expr_varindex = access_expr
def ptr_access_expr(self, baseexpr, index, dummy=False):
assert 0 <= index <= sys.maxint, "invalid constant index %r" % (index,)
return self.itemindex_access_expr(baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
if self.ARRAY._hints.get('nolength', False):
return 'RPyNLenItem(%s, %s)' % (baseexpr, indexexpr)
else:
return 'RPyItem(%s, %s)' % (baseexpr, indexexpr)
def definition(self):
yield 'struct %s {' % self.name
for fname, typename in self.gcfields:
yield '\t' + cdecl(typename, fname) + ';'
if not self.ARRAY._hints.get('nolength', False):
yield '\tlong length;'
line = '%s;' % cdecl(self.itemtypename,
'items[%s]' % deflength(self.varlength))
if self.ARRAY.OF is Void: # strange
line = '/* array of void */'
if self.ARRAY._hints.get('nolength', False):
line = 'char _dummy; ' + line
yield '\t' + line
yield '};'
if self.varlength is not None:
yield 'union %su {' % self.name
yield ' struct %s a;' % self.name
yield ' %s;' % cdecl(self.normalizedtypename, 'b')
yield '};'
def visitor_lines(self, prefix, on_item):
assert self.varlength is None
ARRAY = self.ARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, ARRAY.OF))
if body:
yield '{'
yield '\t%s = %s.items;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %s.length;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
prefix)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
class BareBoneArrayDefNode(NodeWithDependencies):
"""For 'simple' array types which don't need a length nor GC headers.
Implemented directly as a C array instead of a struct with an items field.
rffi kind of expects such arrays to be 'bare' C arrays.
"""
gcinfo = None
name = None
forward_decl = None
extra_union_for_varlength = False
def __init__(self, db, ARRAY, varlength=None):
NodeWithDependencies.__init__(self, db)
self.ARRAY = ARRAY
self.LLTYPE = ARRAY
self.varlength = varlength
contained_type = ARRAY.OF
# There is no such thing as an array of voids:
# we use a an array of chars instead; only the pointer can be void*.
self.itemtypename = db.gettype(contained_type, who_asks=self)
self.fulltypename = self.itemtypename.replace('@', '(@)[%s]' %
deflength(varlength))
if ARRAY._hints.get("render_as_void"):
self.fullptrtypename = 'void *@'
else:
self.fullptrtypename = self.itemtypename.replace('@', '*@')
if ARRAY._hints.get("render_as_const"):
self.fullptrtypename = 'const ' + self.fullptrtypename
def setup(self):
"""Array loops are forbidden by ForwardReference.become() because
there is no way to declare them in C."""
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index):
return '%s[%d]' % (baseexpr, index)
access_expr_varindex = access_expr
def ptr_access_expr(self, baseexpr, index, dummy=False):
assert 0 <= index <= sys.maxint, "invalid constant index %r" % (index,)
return self.itemindex_access_expr(baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
if self.ARRAY._hints.get("render_as_void"):
return 'RPyBareItem((char*)%s, %s)' % (baseexpr, indexexpr)
else:
return 'RPyBareItem(%s, %s)' % (baseexpr, indexexpr)
def definition(self):
return [] # no declaration is needed
def visitor_lines(self, prefix, on_item):
raise Exception("cannot visit C arrays - don't know the length")
class FixedSizeArrayDefNode(NodeWithDependencies):
gcinfo = None
name = None
typetag = 'struct'
extra_union_for_varlength = False
def __init__(self, db, FIXEDARRAY):
NodeWithDependencies.__init__(self, db)
self.FIXEDARRAY = FIXEDARRAY
self.LLTYPE = FIXEDARRAY
self.itemtypename = db.gettype(FIXEDARRAY.OF, who_asks=self)
self.fulltypename = self.itemtypename.replace('@', '(@)[%d]' %
FIXEDARRAY.length)
self.fullptrtypename = self.itemtypename.replace('@', '*@')
def setup(self):
"""Loops are forbidden by ForwardReference.become() because
there is no way to declare them in C."""
def gettype(self):
return self.fulltypename
def getptrtype(self):
return self.fullptrtypename
def access_expr(self, baseexpr, index, dummy=False):
if not isinstance(index, int):
assert index.startswith('item')
index = int(index[4:])
if not (0 <= index < self.FIXEDARRAY.length):
raise IndexError("refusing to generate a statically out-of-bounds"
" array indexing")
return '%s[%d]' % (baseexpr, index)
ptr_access_expr = access_expr
def access_expr_varindex(self, baseexpr, index):
return '%s[%s]' % (baseexpr, index)
def itemindex_access_expr(self, baseexpr, indexexpr):
return 'RPyFxItem(%s, %s, %d)' % (baseexpr, indexexpr,
self.FIXEDARRAY.length)
def definition(self):
return [] # no declaration is needed
def visitor_lines(self, prefix, on_item):
FIXEDARRAY = self.FIXEDARRAY
# we need a unique name for this C variable, or at least one that does
# not collide with the expression in 'prefix'
i = 0
varname = 'p0'
while prefix.find(varname) >= 0:
i += 1
varname = 'p%d' % i
body = list(on_item('(*%s)' % varname, FIXEDARRAY.OF))
if body:
yield '{'
yield '\t%s = %s;' % (cdecl(self.itemtypename, '*' + varname),
prefix)
yield '\t%s = %s + %d;' % (cdecl(self.itemtypename,
'*%s_end' % varname),
varname,
FIXEDARRAY.length)
yield '\twhile (%s != %s_end) {' % (varname, varname)
for line in body:
yield '\t\t' + line
yield '\t\t%s++;' % varname
yield '\t}'
yield '}'
class ExtTypeOpaqueDefNode(NodeWithDependencies):
"""For OpaqueTypes created with the hint render_structure."""
typetag = 'struct'
def __init__(self, db, T):
NodeWithDependencies.__init__(self, db)
self.T = T
self.name = 'RPyOpaque_%s' % (T.tag,)
def setup(self):
pass
def definition(self):
return []
# ____________________________________________________________
class ContainerNode(Node):
if USESLOTS: # keep the number of slots down!
__slots__ = """db obj
typename implementationtypename
name
_funccodegen_owner
globalcontainer""".split()
eci_name = '_compilation_info'
def __init__(self, db, T, obj):
Node.__init__(self, db)
self.obj = obj
self.typename = db.gettype(T) #, who_asks=self)
self.implementationtypename = db.gettype(
T, varlength=self.getvarlength())
parent, parentindex = parentlink(obj)
if obj in exports.EXPORTS_obj2name:
self.name = exports.EXPORTS_obj2name[obj]
self.globalcontainer = 2 # meh
elif parent is None:
self.name = db.namespace.uniquename('g_' + self.basename())
self.globalcontainer = True
else:
self.globalcontainer = False
parentnode = db.getcontainernode(parent)
defnode = db.gettypedefnode(parentnode.getTYPE())
self.name = defnode.access_expr(parentnode.name, parentindex)
if self.typename != self.implementationtypename:
if db.gettypedefnode(T).extra_union_for_varlength:
self.name += '.b'
self._funccodegen_owner = None
def getptrname(self):
return '(&%s)' % self.name
def getTYPE(self):
return typeOf(self.obj)
def is_thread_local(self):
T = self.getTYPE()
return hasattr(T, "_hints") and T._hints.get('thread_local')
def is_exported(self):
return self.globalcontainer == 2 # meh
def compilation_info(self):
return getattr(self.obj, self.eci_name, None)
def get_declaration(self):
if self.name[-2:] == '.b':
# xxx fish fish
assert self.implementationtypename.startswith('struct ')
assert self.implementationtypename.endswith(' @')
uniontypename = 'union %su @' % self.implementationtypename[7:-2]
return uniontypename, self.name[:-2]
else:
return self.implementationtypename, self.name
def forward_declaration(self):
if llgroup.member_of_group(self.obj):
return
type, name = self.get_declaration()
yield '%s;' % (
forward_cdecl(type, name, self.db.standalone,
is_thread_local=self.is_thread_local(),
is_exported=self.is_exported()))
def implementation(self):
if llgroup.member_of_group(self.obj):
return []
lines = list(self.initializationexpr())
type, name = self.get_declaration()
if name != self.name and len(lines) < 2:
# a union with length 0
lines[0] = cdecl(type, name, self.is_thread_local())
else:
if name != self.name:
lines[0] = '{ ' + lines[0] # extra braces around the 'a' part
lines[-1] += ' }' # of the union
lines[0] = '%s = %s' % (
cdecl(type, name, self.is_thread_local()),
lines[0])
lines[-1] += ';'
return lines
def startupcode(self):
return []
def getvarlength(self):
return None
assert not USESLOTS or '__dict__' not in dir(ContainerNode)
class StructNode(ContainerNode):
nodekind = 'struct'
if USESLOTS:
__slots__ = ()
def basename(self):
T = self.getTYPE()
return T._name
def enum_dependencies(self):
T = self.getTYPE()
for name in T._names:
yield getattr(self.obj, name)
def getvarlength(self):
T = self.getTYPE()
if T._arrayfld is None:
return None
else:
array = getattr(self.obj, T._arrayfld)
return len(array.items)
def initializationexpr(self, decoration=''):
T = self.getTYPE()
is_empty = True
defnode = self.db.gettypedefnode(T)
data = []
if needs_gcheader(T):
gc_init = self.db.gcpolicy.struct_gcheader_initdata(self)
data.append(('gcheader', gc_init))
for name in defnode.fieldnames:
data.append((name, getattr(self.obj, name)))
# Reasonably, you should only initialise one of the fields of a union
# in C. This is possible with the syntax '.fieldname value' or
# '.fieldname = value'. But here we don't know which of the
# fields need initialization, so XXX we pick the first one
# arbitrarily.
if hasattr(T, "_hints") and T._hints.get('union'):
data = data[0:1]
if 'get_padding_drop' in T._hints:
d = {}
for name, _ in data:
T1 = defnode.c_struct_field_type(name)
typename = self.db.gettype(T1)
d[name] = cdecl(typename, '')
padding_drop = T._hints['get_padding_drop'](d)
else:
padding_drop = []
type, name = self.get_declaration()
if name != self.name and self.getvarlength() < 1 and len(data) < 2:
# an empty union
yield ''
return
yield '{'
for name, value in data:
if name in padding_drop:
continue
c_expr = defnode.access_expr(self.name, name)
lines = generic_initializationexpr(self.db, value, c_expr,
decoration + name)
for line in lines:
yield '\t' + line
if not lines[0].startswith('/*'):
is_empty = False
if is_empty:
yield '\t%s' % '0,'
yield '}'
assert not USESLOTS or '__dict__' not in dir(StructNode)
class GcStructNodeWithHash(StructNode):
# for the outermost level of nested structures, if it has a _hash_cache_.
nodekind = 'struct'
if USESLOTS:
__slots__ = ()
def get_hash_typename(self):
return 'struct _hashT_%s @' % self.name
def forward_declaration(self):
T = self.getTYPE()
assert self.typename == self.implementationtypename # no array part
hash_typename = self.get_hash_typename()
hash_offset = self.db.gctransformer.get_hash_offset(T)
yield '%s {' % cdecl(hash_typename, '')
yield '\tunion {'
yield '\t\t%s;' % cdecl(self.implementationtypename, 'head')
yield '\t\tchar pad[%s];' % name_signed(hash_offset, self.db)
yield '\t} u;'
yield '\tlong hash;'
yield '};'
yield '%s;' % (
forward_cdecl(hash_typename, '_hash_' + self.name,
self.db.standalone, self.is_thread_local()),)
yield '#define %s _hash_%s.u.head' % (self.name, self.name)
def implementation(self):
hash_typename = self.get_hash_typename()
hash = self.db.gcpolicy.get_prebuilt_hash(self.obj)
assert hash is not None
lines = list(self.initializationexpr())
lines.insert(0, '%s = { {' % (
cdecl(hash_typename, '_hash_' + self.name,
self.is_thread_local()),))
lines.append('}, %s /* hash */ };' % name_signed(hash, self.db))
return lines
def gcstructnode_factory(db, T, obj):
if db.gcpolicy.get_prebuilt_hash(obj) is not None:
cls = GcStructNodeWithHash
else:
cls = StructNode
return cls(db, T, obj)
class ArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def getptrname(self):
if barebonearray(self.getTYPE()):
return self.name
return ContainerNode.getptrname(self)
def basename(self):
return 'array'
def enum_dependencies(self):
return self.obj.items
def getvarlength(self):
return len(self.obj.items)
def initializationexpr(self, decoration=''):
T = self.getTYPE()
yield '{'
if needs_gcheader(T):
gc_init = self.db.gcpolicy.array_gcheader_initdata(self)
lines = generic_initializationexpr(self.db, gc_init, 'gcheader',
'%sgcheader' % (decoration,))
for line in lines:
yield line
if T._hints.get('nolength', False):
length = ''
else:
length = '%d, ' % len(self.obj.items)
if T.OF is Void or len(self.obj.items) == 0:
yield '\t%s' % length.rstrip(', ')
yield '}'
elif T.OF == Char:
if len(self.obj.items) and self.obj.items[0] is None:
s = ''.join([self.obj.getitem(i) for i in range(len(self.obj.items))])
else:
s = ''.join(self.obj.items)
array_constant = c_char_array_constant(s)
if array_constant.startswith('{') and barebonearray(T):
assert array_constant.endswith('}')
array_constant = array_constant[1:-1].strip()
yield '\t%s%s' % (length, array_constant)
yield '}'
else:
barebone = barebonearray(T)
if not barebone:
yield '\t%s{' % length
for j in range(len(self.obj.items)):
value = self.obj.items[j]
basename = self.name
if basename.endswith('.b'):
basename = basename[:-2] + '.a'
lines = generic_initializationexpr(self.db, value,
'%s.items[%d]' % (basename, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
if not barebone:
yield '} }'
else:
yield '}'
assert not USESLOTS or '__dict__' not in dir(ArrayNode)
class FixedSizeArrayNode(ContainerNode):
nodekind = 'array'
if USESLOTS:
__slots__ = ()
def getptrname(self):
if not isinstance(self.obj, _subarray): # XXX hackish
return self.name
return ContainerNode.getptrname(self)
def basename(self):
T = self.getTYPE()
return T._name
def enum_dependencies(self):
for i in range(self.obj.getlength()):
yield self.obj.getitem(i)
def getvarlength(self):
return None # not variable-sized!
def initializationexpr(self, decoration=''):
T = self.getTYPE()
assert self.typename == self.implementationtypename # not var-sized
yield '{'
# _names == ['item0', 'item1', ...]
for j, name in enumerate(T._names):
value = getattr(self.obj, name)
lines = generic_initializationexpr(self.db, value,
'%s[%d]' % (self.name, j),
'%s%d' % (decoration, j))
for line in lines:
yield '\t' + line
yield '}'
def generic_initializationexpr(db, value, access_expr, decoration):
if isinstance(typeOf(value), ContainerType):
node = db.getcontainernode(value)
lines = list(node.initializationexpr(decoration+'.'))
lines[-1] += ','
return lines
else:
comma = ','
if typeOf(value) == Float and not isfinite(value):
db.late_initializations.append(('%s' % access_expr, db.get(value)))
if isinf(value):
name = '-+'[value > 0] + 'inf'
else:
name = 'NaN'
expr = '0.0 /* patched later with %s */' % (name,)
else:
expr = db.get(value)
if typeOf(value) is Void:
comma = ''
expr += comma
i = expr.find('\n')
if i<0: i = len(expr)
expr = '%s\t/* %s */%s' % (expr[:i], decoration, expr[i:])
return expr.split('\n')
# ____________________________________________________________
class FuncNode(ContainerNode):
nodekind = 'func'
eci_name = 'compilation_info'
# there not so many node of this kind, slots should not
# be necessary
def __init__(self, db, T, obj, forcename=None):
Node.__init__(self, db)
self.globalcontainer = True
self.T = T
self.obj = obj
callable = getattr(obj, '_callable', None)
if (callable is not None and
getattr(callable, 'c_name', None) is not None):
self.name = forcename or obj._callable.c_name
elif getattr(obj, 'external', None) == 'C' and not db.need_sandboxing(obj):
self.name = forcename or self.basename()
else:
self.name = (forcename or
db.namespace.uniquename('g_' + self.basename()))
self.make_funcgens()
self.typename = db.gettype(T) #, who_asks=self)
def getptrname(self):
return self.name
def make_funcgens(self):
self.funcgens = select_function_code_generators(self.obj, self.db, self.name)
if self.funcgens:
argnames = self.funcgens[0].argnames() #Assume identical for all funcgens
self.implementationtypename = self.db.gettype(self.T, argnames=argnames)
self._funccodegen_owner = self.funcgens[0]
else:
self._funccodegen_owner = None
def basename(self):
return self.obj._name
def enum_dependencies(self):
if not self.funcgens:
return []
return self.funcgens[0].allconstantvalues() #Assume identical for all funcgens
def forward_declaration(self):
callable = getattr(self.obj, '_callable', None)
is_exported = getattr(callable, 'exported_symbol', False)
for funcgen in self.funcgens:
yield '%s;' % (
forward_cdecl(self.implementationtypename,
funcgen.name(self.name), self.db.standalone,
is_exported=is_exported))
def implementation(self):
for funcgen in self.funcgens:
for s in self.funcgen_implementation(funcgen):
yield s
def graphs_to_patch(self):
for funcgen in self.funcgens:
for i in funcgen.graphs_to_patch():
yield i
def funcgen_implementation(self, funcgen):
funcgen.implementation_begin()
# recompute implementationtypename as the argnames may have changed
argnames = funcgen.argnames()
implementationtypename = self.db.gettype(self.T, argnames=argnames)
yield '%s {' % cdecl(implementationtypename, funcgen.name(self.name))
#
# declare the local variables
#
localnames = list(funcgen.cfunction_declarations())
lengths = [len(a) for a in localnames]
lengths.append(9999)
start = 0
while start < len(localnames):
# pack the local declarations over as few lines as possible
total = lengths[start] + 8
end = start+1
while total + lengths[end] < 77:
total += lengths[end] + 1
end += 1
yield '\t' + ' '.join(localnames[start:end])
start = end
#
# generate the body itself
#
bodyiter = funcgen.cfunction_body()
for line in bodyiter:
# performs some formatting on the generated body:
# indent normal lines with tabs; indent labels less than the rest
if line.endswith(':'):
if line.startswith('err'):
try:
nextline = bodyiter.next()
except StopIteration:
nextline = ''
# merge this 'err:' label with the following line
line = '\t%s\t%s' % (line, nextline)
else:
line = ' ' + line
elif line:
line = '\t' + line
yield line
yield '}'
del bodyiter
funcgen.implementation_end()
def sandbox_stub(fnobj, db):
# unexpected external function for --sandbox translation: replace it
# with a "Not Implemented" stub. To support these functions, port them
# to the new style registry (e.g. rpython.module.ll_os.RegisterOs).
from rpython.translator.sandbox import rsandbox
graph = rsandbox.get_external_function_sandbox_graph(fnobj, db,
force_stub=True)
return [FunctionCodeGenerator(graph, db)]
def sandbox_transform(fnobj, db):
# for --sandbox: replace a function like os_open_llimpl() with
# code that communicates with the external process to ask it to
# perform the operation.
from rpython.translator.sandbox import rsandbox
graph = rsandbox.get_external_function_sandbox_graph(fnobj, db)
return [FunctionCodeGenerator(graph, db)]
def select_function_code_generators(fnobj, db, functionname):
sandbox = db.need_sandboxing(fnobj)
if hasattr(fnobj, 'graph'):
if sandbox and sandbox != "if_external":
# apply the sandbox transformation
return sandbox_transform(fnobj, db)
exception_policy = getattr(fnobj, 'exception_policy', None)
return [FunctionCodeGenerator(fnobj.graph, db, exception_policy,
functionname)]
elif getattr(fnobj, 'external', None) is not None:
if sandbox:
return sandbox_stub(fnobj, db)
elif fnobj.external == 'C':
return []
else:
assert fnobj.external == 'CPython'
return [CExternalFunctionCodeGenerator(fnobj, db)]
elif hasattr(fnobj._callable, "c_name"):
return [] # this case should only be used for entrypoints
else:
raise ValueError("don't know how to generate code for %r" % (fnobj,))
class ExtType_OpaqueNode(ContainerNode):
nodekind = 'rpyopaque'
def enum_dependencies(self):
return []
def initializationexpr(self, decoration=''):
T = self.getTYPE()
raise NotImplementedError(
'seeing an unexpected prebuilt object: %s' % (T.tag,))
def startupcode(self):
T = self.getTYPE()
args = [self.getptrname()]
# XXX how to make this code more generic?
if T.tag == 'ThreadLock':
lock = self.obj.externalobj
if lock.locked():
args.append('1')
else:
args.append('0')
yield 'RPyOpaque_SETUP_%s(%s);' % (T.tag, ', '.join(args))
def opaquenode_factory(db, T, obj):
if T == RuntimeTypeInfo:
return db.gcpolicy.rtti_node_factory()(db, T, obj)
if T.hints.get("render_structure", False):
return ExtType_OpaqueNode(db, T, obj)
raise Exception("don't know about %r" % (T,))
def weakrefnode_factory(db, T, obj):
assert isinstance(obj, llmemory._wref)
ptarget = obj._dereference()
wrapper = db.gcpolicy.convert_weakref_to(ptarget)
container = wrapper._obj
#obj._converted_weakref = container # hack for genllvm :-/
return db.getcontainernode(container, _dont_write_c_code=False)
class GroupNode(ContainerNode):
nodekind = 'group'
count_members = None
def __init__(self, *args):
ContainerNode.__init__(self, *args)
self.implementationtypename = 'struct group_%s_s @' % self.name
def basename(self):
return self.obj.name
def enum_dependencies(self):
# note: for the group used by the GC, it can grow during this phase,
# which means that we might not return all members yet. This is fixed
# by get_finish_tables() in rpython.memory.gctransform.framework.
for member in self.obj.members:
yield member._as_ptr()
def _fix_members(self):
if self.obj.outdated:
raise Exception(self.obj.outdated)
if self.count_members is None:
self.count_members = len(self.obj.members)
else:
# make sure no new member showed up, because it's too late
assert len(self.obj.members) == self.count_members
def forward_declaration(self):
self._fix_members()
yield ''
ctype = ['%s {' % cdecl(self.implementationtypename, '')]
for i, member in enumerate(self.obj.members):
structtypename = self.db.gettype(typeOf(member))
ctype.append('\t%s;' % cdecl(structtypename, 'member%d' % i))
ctype.append('} @')
ctype = '\n'.join(ctype)
yield '%s;' % (
forward_cdecl(ctype, self.name, self.db.standalone,
self.is_thread_local()))
yield '#include "src/llgroup.h"'
yield 'PYPY_GROUP_CHECK_SIZE(%s)' % (self.name,)
for i, member in enumerate(self.obj.members):
structnode = self.db.getcontainernode(member)
yield '#define %s %s.member%d' % (structnode.name,
self.name, i)
yield ''
def initializationexpr(self):
self._fix_members()
lines = ['{']
lasti = len(self.obj.members) - 1
for i, member in enumerate(self.obj.members):
structnode = self.db.getcontainernode(member)
lines1 = list(structnode.initializationexpr())
lines1[0] += '\t/* member%d: %s */' % (i, structnode.name)
if i != lasti:
lines1[-1] += ','
lines.extend(lines1)
lines.append('}')
return lines
ContainerNodeFactory = {
Struct: StructNode,
GcStruct: gcstructnode_factory,
Array: ArrayNode,
GcArray: ArrayNode,
FixedSizeArray: FixedSizeArrayNode,
FuncType: FuncNode,
OpaqueType: opaquenode_factory,
llmemory._WeakRefType: weakrefnode_factory,
llgroup.GroupType: GroupNode,
}
|
jptomo/rpython-lang-scheme
|
rpython/translator/c/node.py
|
Python
|
mit
| 38,642
|
[
"VisIt"
] |
7984dc11df831622fbc5d28abd37adf2c6db3e47490067da6639c794f24f01fb
|
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy import signals, model
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class Test_Estimate_Elastic_Scattering_Threshold:
def setup_method(self, method):
# Create an empty spectrum
s = signals.EELSSpectrum(np.zeros((3, 2, 1024)))
energy_axis = s.axes_manager.signal_axes[0]
energy_axis.scale = 0.02
energy_axis.offset = -5
gauss = model.components1d.Gaussian()
gauss.centre.value = 0
gauss.A.value = 5000
gauss.sigma.value = 0.5
gauss2 = model.components1d.Gaussian()
gauss2.sigma.value = 0.5
# Inflexion point 1.5
gauss2.A.value = 5000
gauss2.centre.value = 5
s.data[:] = (gauss.function(energy_axis.axis) +
gauss2.function(energy_axis.axis))
self.signal = s
def test_min_in_window_with_smoothing(self):
s = self.signal
thr = s.estimate_elastic_scattering_threshold(
window=5,
window_length=5,
tol=0.00001,
)
np.testing.assert_allclose(thr.data, 2.5, atol=10e-3)
assert thr.metadata.Signal.signal_type == ""
assert thr.axes_manager.signal_dimension == 0
def test_min_in_window_without_smoothing_single_spectrum(self):
s = self.signal.inav[0, 0]
thr = s.estimate_elastic_scattering_threshold(
window=5,
window_length=0,
tol=0.001,
)
np.testing.assert_allclose(thr.data, 2.49, atol=10e-3)
def test_min_in_window_without_smoothing(self):
s = self.signal
thr = s.estimate_elastic_scattering_threshold(
window=5,
window_length=0,
tol=0.001,
)
np.testing.assert_allclose(thr.data, 2.49, atol=10e-3)
def test_min_not_in_window(self):
# If I use a much lower window, this is the value that has to be
# returned as threshold.
s = self.signal
data = s.estimate_elastic_scattering_threshold(window=1.5,
tol=0.001,
).data
assert np.all(np.isnan(data))
def test_estimate_elastic_scattering_intensity(self):
s = self.signal
threshold = s.estimate_elastic_scattering_threshold(window=4.)
# Threshold is nd signal
t = s.estimate_elastic_scattering_intensity(threshold=threshold)
assert t.metadata.Signal.signal_type == ""
assert t.axes_manager.signal_dimension == 0
np.testing.assert_array_almost_equal(t.data, 249999.985133)
# Threshold is signal, 1 spectrum
s0 = s.inav[0]
t0 = s0.estimate_elastic_scattering_threshold(window=4.)
t = s0.estimate_elastic_scattering_intensity(threshold=t0)
np.testing.assert_array_almost_equal(t.data, 249999.985133)
# Threshold is value
t = s.estimate_elastic_scattering_intensity(threshold=2.5)
np.testing.assert_array_almost_equal(t.data, 249999.985133)
@lazifyTestClass
class TestEstimateZLPCentre:
def setup_method(self, method):
s = signals.EELSSpectrum(np.diag(np.arange(1, 11)))
s.axes_manager[-1].scale = 0.1
s.axes_manager[-1].offset = 100
self.signal = s
def test_estimate_zero_loss_peak_centre(self):
s = self.signal
zlpc = s.estimate_zero_loss_peak_centre()
np.testing.assert_allclose(zlpc.data,
np.arange(100,
101,
0.1))
assert zlpc.metadata.Signal.signal_type == ""
assert zlpc.axes_manager.signal_dimension == 0
@lazifyTestClass
class TestAlignZLP:
def setup_method(self, method):
s = signals.EELSSpectrum(np.zeros((10, 100)))
self.scale = 0.1
self.offset = -2
eaxis = s.axes_manager.signal_axes[0]
eaxis.scale = self.scale
eaxis.offset = self.offset
self.izlp = eaxis.value2index(0)
self.bg = 2
self.ishifts = np.array([0, 4, 2, -2, 5, -2, -5, -9, -9, -8])
self.new_offset = self.offset - self.ishifts.min() * self.scale
s.data[np.arange(10), self.ishifts + self.izlp] = 10
s.data += self.bg
s.axes_manager[-1].offset += 100
self.signal = s
def test_align_zero_loss_peak_calibrate_true(self):
s = self.signal
s.align_zero_loss_peak(
calibrate=True,
print_stats=False,
show_progressbar=None)
zlpc = s.estimate_zero_loss_peak_centre()
np.testing.assert_allclose(zlpc.data.mean(), 0)
np.testing.assert_allclose(zlpc.data.std(), 0)
def test_align_zero_loss_peak_calibrate_true_with_mask(self):
s = self.signal
mask = s._get_navigation_signal(dtype="bool").T
mask.data[[3, 5]] = (True, True)
s.align_zero_loss_peak(
calibrate=True,
print_stats=False,
show_progressbar=None,
mask=mask)
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
np.testing.assert_allclose(np.nanmean(zlpc.data), 0,
atol=np.finfo(float).eps)
np.testing.assert_allclose(np.nanstd(zlpc.data), 0,
atol=np.finfo(float).eps)
def test_align_zero_loss_peak_calibrate_false(self):
s = self.signal
s.align_zero_loss_peak(
calibrate=False,
print_stats=False,
show_progressbar=None)
zlpc = s.estimate_zero_loss_peak_centre()
np.testing.assert_allclose(zlpc.data.std(), 0, atol=10e-3)
def test_also_aligns(self):
s = self.signal
s2 = s.deepcopy()
s.align_zero_loss_peak(calibrate=True,
print_stats=False,
also_align=[s2],
show_progressbar=None)
zlpc = s2.estimate_zero_loss_peak_centre()
assert zlpc.data.mean() == 0
assert zlpc.data.std() == 0
def test_align_zero_loss_peak_with_spike_signal_range(self):
s = self.signal
spike = np.zeros((10, 100))
spike_amplitude = 20
spike[:, 75] = spike_amplitude
s.data += spike
s.align_zero_loss_peak(
print_stats=False, subpixel=False, signal_range=(98., 102.))
zlp_max = s.isig[-0.5:0.5].max(-1).data
# Max value in the original spectrum is 12, but due to the aligning
# the peak is split between two different channels. So 8 is the
# maximum value for the aligned spectrum
assert np.allclose(zlp_max, 8)
def test_align_zero_loss_peak_crop_false(self):
s = self.signal
original_size = s.axes_manager.signal_axes[0].size
s.align_zero_loss_peak(
crop=False,
print_stats=False,
show_progressbar=None)
assert original_size == s.axes_manager.signal_axes[0].size
@lazifyTestClass
class TestPowerLawExtrapolation:
def setup_method(self, method):
s = signals.EELSSpectrum(0.1 * np.arange(50, 250, 0.5) ** -3.)
s.metadata.Signal.binned = False
s.axes_manager[-1].offset = 50
s.axes_manager[-1].scale = 0.5
self.s = s
def test_unbinned(self):
sc = self.s.isig[:300]
s = sc.power_law_extrapolation(extrapolation_size=100)
np.testing.assert_allclose(s.data, self.s.data, atol=10e-3)
def test_binned(self):
self.s.data *= self.s.axes_manager[-1].scale
self.s.metadata.Signal.binned = True
sc = self.s.isig[:300]
s = sc.power_law_extrapolation(extrapolation_size=100)
np.testing.assert_allclose(s.data, self.s.data, atol=10e-3)
@lazifyTestClass
class TestFourierRatioDeconvolution:
@pytest.mark.parametrize(('extrapolate_lowloss'), [True, False])
def test_running(self, extrapolate_lowloss):
s = signals.EELSSpectrum(np.arange(200))
gaussian = model.components1d.Gaussian()
gaussian.A.value = 50
gaussian.sigma.value = 10
gaussian.centre.value = 20
s_ll = signals.EELSSpectrum(gaussian.function(np.arange(0, 200, 1)))
s_ll.axes_manager[0].offset = -50
s.fourier_ratio_deconvolution(s_ll,
extrapolate_lowloss=extrapolate_lowloss)
class TestRebin:
def setup_method(self, method):
# Create an empty spectrum
s = signals.EELSSpectrum(np.ones((4, 2, 1024)))
self.signal = s
def test_rebin_without_dwell_time(self):
s = self.signal
s.rebin(scale=(2, 2, 1))
def test_rebin_dwell_time(self):
s = self.signal
s.metadata.add_node("Acquisition_instrument.TEM.Detector.EELS")
s_mdEELS = s.metadata.Acquisition_instrument.TEM.Detector.EELS
s_mdEELS.dwell_time = 0.1
s_mdEELS.exposure = 0.5
s2 = s.rebin(scale=(2, 2, 8))
s2_mdEELS = s2.metadata.Acquisition_instrument.TEM.Detector.EELS
assert s2_mdEELS.dwell_time == (0.1 * 2 * 2)
assert s2_mdEELS.exposure == (0.5 * 2 * 2)
def test_rebin_exposure(self):
s = self.signal
s.metadata.exposure = 10.2
s2 = s.rebin(scale=(2, 2, 8))
assert s2.metadata.exposure == (10.2 * 2 * 2)
def test_offset_after_rebin(self):
s = self.signal
s.axes_manager[0].offset = 1
s.axes_manager[1].offset = 2
s.axes_manager[2].offset = 3
s2 = s.rebin(scale=(2, 2, 1))
assert s2.axes_manager[0].offset == 1.5
assert s2.axes_manager[1].offset == 2.5
assert s2.axes_manager[2].offset == s.axes_manager[2].offset
|
francisco-dlp/hyperspy
|
hyperspy/tests/signal/test_eels.py
|
Python
|
gpl-3.0
| 10,558
|
[
"Gaussian"
] |
4fd0cba058d49f29d4ac51b1dd3837d47e84347c1cda1f9e3d0037ab7536228f
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
class CellSystem(ut.TestCase):
system = espressomd.System(box_l=[5.0, 5.0, 5.0])
system.cell_system.skin = 0.0
n_nodes = system.cell_system.get_state()['n_nodes']
def test_cell_system(self):
self.system.cell_system.set_n_square(use_verlet_lists=False)
s = self.system.cell_system.get_state()
self.assertEqual([s['use_verlet_list'], s['type']], [0, "nsquare"])
self.system.cell_system.set_regular_decomposition(
use_verlet_lists=True)
s = self.system.cell_system.get_state()
self.assertEqual(
[s['use_verlet_list'], s['type']], [1, "regular_decomposition"])
@ut.skipIf(n_nodes == 1, "Skipping test: only runs for n_nodes >= 2")
def test_node_grid(self):
self.system.cell_system.set_regular_decomposition()
for i in range(3):
node_grid_ref = [1, 1, 1]
node_grid_ref[i] = self.n_nodes
self.system.cell_system.node_grid = node_grid_ref
node_grid = self.system.cell_system.get_state()['node_grid']
np.testing.assert_array_equal(node_grid, node_grid_ref)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/cellsystem.py
|
Python
|
gpl-3.0
| 1,941
|
[
"ESPResSo"
] |
7f45545cd59fae170b5ce4ad29b8f78d5c7698bfc55f6c617e9aef324e327f7a
|
# -*- coding: utf-8 -*-
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
import re
import numpy as np
from scipy import linalg
from .cov import read_cov, compute_whitener
from .io.constants import FIFF
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import transform_surface_to, _compute_nearest
from .bem import _bem_find_surface, _surf_name
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option)
@fill_doc
class Dipole(object):
u"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
conf : dict
Confidence limits in dipole orientation for "vol" in m^3 (volume),
"depth" in m (along the depth axis), "long" in m (longitudinal axis),
"trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am
(currents). The current confidence limit in the depth direction is
assumed to be zero (although it can be non-zero when a BEM is used).
.. versionadded:: 0.15
khi2 : array, shape (n_dipoles,)
The χ^2 values for the fits.
.. versionadded:: 0.15
nfree : array, shape (n_dipoles,)
The number of free parameters for each fit.
.. versionadded:: 0.15
%(verbose)s
See Also
--------
fit_dipole
DipoleFixed
read_dipole
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
verbose=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = deepcopy(conf) if conf is not None else dict()
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
self.verbose = verbose
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file.
Parameters
----------
fname : str
The name of the .dip file.
"""
# obligatory fields
fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'
header = ('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = (t, t, self.pos / 1e-3, amp, self.ori * amp, gof)
# optional fields
fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),
nfree=(' free', ' %5d', 1),
vol=(' vol/mm^3', ' %9.3f', 1e9),
depth=(' depth/mm', ' %9.3f', 1e3),
long=(' long/mm', ' %8.3f', 1e3),
trans=(' trans/mm', ' %9.3f', 1e3),
qlong=(' Qlong/nAm', ' %10.3f', 1e9),
qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),
)
for key in ('khi2', 'nfree'):
data = getattr(self, key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):
data = self.conf.get(key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
out = np.concatenate(out, axis=-1)
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write((header + '\n').encode('utf-8'))
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
self : instance of Dipole
The cropped instance.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
verbose=None):
"""Plot dipole locations in 3d.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
mode : str
Currently only ``'orthoview'`` is supported.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
%(verbose_meth)s
Returns
-------
fig : instance of mayavi.mlab.Figure or matplotlib.figure.Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
from .viz import plot_dipole_locations
dipoles = self
if mode in [None, 'cone', 'sphere']: # support old behavior
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
elif mode != 'orthoview':
raise ValueError("mode must be 'cone', 'sphere' or 'orthoview'. "
"Got %s." % mode)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, mode, coord_frame, idx,
show_all, ax, block, show)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time.
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice.
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
selected_conf = dict()
for key in self.conf.keys():
selected_conf[key] = self.conf[key][item]
selected_khi2 = self.khi2[item] if self.khi2 is not None else None
selected_nfree = self.nfree[item] if self.nfree is not None else None
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name, selected_conf, selected_khi2,
selected_nfree)
def __len__(self):
"""Return the number of dipoles.
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Read a fixed dipole FIF file."""
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, first, last, comment, times, data = \
_read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, first, last,
comment)
@fill_doc
class DipoleFixed(object):
"""Dipole class for fixed-position dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
first : int
First sample.
last : int
Last sample.
comment : str
The dipole comment.
%(verbose)s
See Also
--------
read_dipole
Dipole
fit_dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind, first, last,
comment, verbose=None): # noqa: D102
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(aspect_kind, 'unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<DipoleFixed | %s>" % s
def copy(self):
"""Copy the DipoleFixed object.
Returns
-------
inst : instance of DipoleFixed
The copy.
Notes
-----
.. versionadded:: 0.16
"""
return deepcopy(self)
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file.
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
%(verbose_meth)s
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',
'_dip.fif', '_dip.fif.gz',),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True, time_unit='s'):
"""Plot dipole data.
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False,
time_unit=time_unit)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE.
Parameters
----------
fname : str
The name of the .dip or .fif file.
%(verbose)s
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
Dipole
DipoleFixed
fit_dipole
"""
_check_fname(fname, overwrite='read', must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
else:
return _read_dipole_text(fname)
def _read_dipole_text(fname):
"""Read a dipole text file."""
# Figure out the special fields
need_header = True
def_line = name = None
# There is a bug in older np.loadtxt regarding skipping fields,
# so just read the data ourselves (need to get name and header anyway)
data = list()
with open(fname, 'r') as fid:
for line in fid:
if not (line.startswith('%') or line.startswith('#')):
need_header = False
data.append(line.strip().split())
else:
if need_header:
def_line = line
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
del line
data = np.atleast_2d(np.array(data, float))
if def_line is None:
raise IOError('Dipole text file is missing field definition '
'comment, cannot parse %s' % (fname,))
# actually parse the fields
def_line = def_line.lstrip('%').lstrip('#').strip()
# MNE writes it out differently than Elekta, let's standardize them...
fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc.
lambda match: match.group(1).strip() + '/mm', def_line)
fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc.
lambda match: '/' + match.group(1), fields)
fields = re.sub('(begin|end) ', # "begin" and "end" with no units
lambda match: match.group(1) + '/ms', fields)
fields = fields.lower().split()
required_fields = ('begin/ms',
'x/mm', 'y/mm', 'z/mm',
'q/nam', 'qx/nam', 'qy/nam', 'qz/nam',
'g/%')
optional_fields = ('khi^2', 'free', # standard ones
# now the confidence fields (up to 5!)
'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm',
'qlong/nam', 'qtrans/nam')
conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9]
missing_fields = sorted(set(required_fields) - set(fields))
if len(missing_fields) > 0:
raise RuntimeError('Could not find necessary fields in header: %s'
% (missing_fields,))
handled_fields = set(required_fields) | set(optional_fields)
assert len(handled_fields) == len(required_fields) + len(optional_fields)
ignored_fields = sorted(set(fields) -
set(handled_fields) -
{'end/ms'})
if len(ignored_fields) > 0:
warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))
if len(fields) != data.shape[1]:
raise IOError('More data fields (%s) found than data columns (%s): %s'
% (len(fields), data.shape[1], fields))
logger.info("%d dipole(s) found" % len(data))
if 'end/ms' in fields:
if np.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in required_fields]
assert len(idx) >= 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
# Deal with optional fields
optional = [None] * 2
for fi, field in enumerate(optional_fields[:2]):
if field in fields:
optional[fi] = data[:, fields.index(field)]
khi2, nfree = optional
conf = dict()
for field, scale in zip(optional_fields[2:], conf_scales): # confidence
if field in fields:
conf[field.split('/')[0]] = scale * data[:, fields.index(field)]
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff."""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
assert np.isfinite(B).all()
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface."""
if 'rr' in surf:
logger.info('Guess surface (%s) is in %s coordinates'
% (_surf_name[surf['id']],
_coord_frame_name(surf['coord_frame'])))
else:
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * surf['R']))
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
assert 'vertno' in src
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] *
(one / sing[:ncomp])[:, np.newaxis], axis=0)
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * (scales[0] * np.sum(one / sing))
ncomp = 3
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
# As describedd in the Xfit manual, confidence intervals can be calculated
# by examining a linearization of model at the best-fitting location,
# i.e. taking the Jacobian and using the whitener:
#
# J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]
# C = (J.T C^-1 J)^-1
#
# And then the confidence interval is the diagonal of C, scaled by 1.96
# (for 95% confidence).
direction = np.empty((3, 3))
# The coordinate system has the x axis aligned with the dipole orientation,
direction[0] = ori
# the z axis through the origin of the sphere model
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize
direction[2] /= np.linalg.norm(direction[2])
# and the y axis perpendical with these forming a right-handed system.
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
# Get spatial deltas in dipole coordinate directions
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# Get current (Q) deltas in the dipole directions
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# J is already whitened, so we don't need to do np.dot(whitener, J).
# However, the units in the Jacobian are potentially quite different,
# so we need to do some normalization during inversion, then revert.
direction_norm = np.linalg.norm(J[:, :3])
Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z
norm = np.array([direction_norm] * 3 + [Q_norm] * 3)
J /= norm
J = np.dot(J.T, J)
C = linalg.pinvh(J, rcond=1e-14)
C /= norm
C /= norm[:, np.newaxis]
conf = 1.96 * np.sqrt(np.diag(C))
# The confidence volume of the dipole location is obtained from by
# taking the eigenvalues of the upper left submatrix and computing
# v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:
vol_conf = 4 * np.pi / 3. * np.sqrt(
476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))
conf = np.concatenate([conf, [vol_conf]])
# Now we reorder and subselect the proper columns:
# vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)
conf = conf[[6, 2, 0, 1, 3, 4]]
return conf
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint."""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint."""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, fmin_cobyla, ori, rank):
"""Fit a single bit of data."""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if 'rr' in fwd_data['inner_skull']: # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
constraint = partial(
_sphere_constraint, r0=fwd_data['inner_skull']['r0'],
R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual_noproj, n_comp = _fit_Q(
fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)
khi2 = (1 - gof) * B2
nfree = rank - n_comp
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank):
"""Fit a data using a fixed position."""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)
# Compute the dipole moment
Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,
rd=None, ori=ori)[:3]
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
rd_final = guess_rrs[0]
# This will be slow, and we don't use it anyway, so omit it for now:
# conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
conf = khi2 = nfree = None
# No corresponding 'logger' message here because it should go *very* fast
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, verbose=None):
"""Fit a dipole.
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in millimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
%(verbose)s
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None, otherwise a
:class:`mne.Dipole` is returned.
residual : instance of Evoked
The M-EEG data channels with the fitted dipolar activity removed.
See Also
--------
mne.beamformer.rap_music
Dipole
DipoleFixed
read_dipole
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, str):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
mri_head_t, trans = _get_trans(trans)
logger.info('MRI transform : %s' % trans)
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
inner_skull['r0'] = r0
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
del R, r0
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame
del R, r0
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, str):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener, _, rank = compute_whitener(cov, info, picks=picks,
return_rank=True)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,
guess_mindist, n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if 'rr' in inner_skull:
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if 'rr' in inner_skull:
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(
pos, inner_skull['r0'],
R_adj=inner_skull['R'] - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, ori, n_jobs, rank)
assert len(out) == 8
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.full(12, np.nan),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info['bads'] = []
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, evoked.first, evoked.last,
comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,
out[4], out[5], out[6])
residual = evoked.copy().apply_proj() # set the projs active
residual.data[picks] = np.dot(proj_op, out[-1])
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
"""Get standard phantom dipole locations and orientations.
Parameters
----------
kind : str
Get the information for the given system:
``vectorview`` (default)
The Neuromag VectorView phantom.
``otaniemi``
The older Neuromag phantom used at Otaniemi.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
Notes
-----
The Elekta phantoms have a radius of 79.5mm, and HPI coil locations
in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).
"""
_check_option('kind', kind, ['vectorview', 'otaniemi'])
if kind == 'vectorview':
# these values were pulled from a scanned image provided by
# Elekta folks
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
signs = ([1, -1] * 4 + [-1, 1] * 4) * 2
elif kind == 'otaniemi':
# these values were pulled from an Neuromag manual
# (NM20456A, 13.7.1999, p.65)
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
signs = [-1] * 8 + [1] * 16 + [-1] * 8
pos = np.vstack((x, y, z)).T / 1000.
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for pi, this_pos in enumerate(pos):
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
this_ori *= signs[pi]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
"""Concatenate a list of dipoles."""
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
|
adykstra/mne-python
|
mne/dipole.py
|
Python
|
bsd-3-clause
| 52,898
|
[
"Mayavi"
] |
b1fc1c2f003eb97d49a2bc7568fb87001d0038b3e9e20bbd9fda08d365cf1652
|
import os
from zombie.compat import TestCase, PY3
if PY3:
from socketserver import UnixStreamServer, StreamRequestHandler
else:
from SocketServer import UnixStreamServer, StreamRequestHandler
import threading
try:
from json import loads, dumps
except ImportError:
from simplejson import loads, dumps # noqa
import fudge
from zombie.proxy.client import (
encode,
encode_args,
decode,
Element,
NodeError,
ZombieServerConnection,
ZombieProxyClient)
from zombie.proxy.server import ZombieProxyServer
from zombie.tests.webserver import WebServerTestCase
class EncodeTests(TestCase):
def test_json(self):
obj = lambda: 1
obj.json = "myjson"
self.assertEqual("myjson", encode(obj))
def test_json_method(self):
obj = lambda: 1
obj.__json__ = lambda: "anotherjson"
self.assertEqual("anotherjson", encode(obj))
def test_asis(self):
obj = [1, 2]
self.assertEqual("[1, 2]", encode(obj))
class EncodeArgsTests(TestCase):
def test_none(self):
self.assertEqual('', encode_args(None))
def test_empty(self):
self.assertEqual('', encode_args([]))
def test_arguments(self):
self.assertEqual('"one", "two"', encode_args(['one', 'two']))
def test_arguments_extra(self):
self.assertEqual('"one", ', encode_args(['one'], True))
class DecodeTests(TestCase):
def test_none(self):
self.assertEqual(None, decode(None))
def test_something(self):
self.assertEqual([1], decode("[1]"))
class ElementTests(TestCase):
def test_index(self):
self.assertEqual(15, Element(15).index)
def test_json(self):
self.assertEqual("ELEMENTS[15]", Element(15).json)
def test_str(self):
self.assertEqual("ELEMENTS[15]", str(Element(15)))
class EchoHandler(StreamRequestHandler):
def handle(self):
self.wfile.write(self.rfile.readline())
class EchoServer(threading.Thread):
def __init__(self, address):
super(EchoServer, self).__init__()
self.daemon = True
self.server = UnixStreamServer(address, EchoHandler)
def run(self):
self.server.handle_request()
def stop(self):
self.server.shutdown()
class ZombieServerConnectionTests(TestCase):
address = '/tmp/testing-unix-server'
def cleanup(self):
if os.path.exists(self.address):
os.remove(self.address)
def setUp(self):
self.cleanup()
self.server = EchoServer(self.address)
self.server.start()
self.connection = ZombieServerConnection(self.address)
def tearDown(self):
self.cleanup()
def test_send(self):
res = self.connection.send('Hello world!\n')
self.assertEqual('Hello world!\n', res)
class ZombieProxyClientTests(WebServerTestCase):
def setUp(self):
super(ZombieProxyClientTests, self).setUp()
# Note, As a singleton so it will be created once, not in every test.
self.server = ZombieProxyServer()
self.client = ZombieProxyClient(self.server.socket)
def tearDown(self):
super(ZombieProxyClientTests, self).tearDown()
def test_simple_json(self):
obj = {
'foo': 'bar',
'test': 500
}
self.assertEqual(obj, self.client.json(obj))
def test_malformed_command(self):
with self.assertRaises(NodeError):
self.client.json("banana")
def test_nowait(self):
self.assertEqual('Test', self.client.nowait("result = 'Test';"))
def test_wait(self):
self.client.wait('browser.visit', self.base_url)
def test_wait_error(self):
with self.assertRaises(NodeError):
self.client.wait('browser.visit', self.base_url + 'notfound')
def test_ping(self):
self.assertEqual("pong", self.client.ping())
def test_cleanup(self):
client = self.client
self.assertEqual(1, client.json('browser.testing = 1'))
client.cleanup()
self.assertFalse(client.json('"testing" in browser'))
def test_create_element(self):
client = self.client
client.wait('browser.visit', self.base_url)
self.assertEqual(
0,
client.create_element('browser.query', ('form',)).index)
self.assertEqual(
1,
client.create_element('browser.query', ('form',)).index)
def test_create_element_attribute(self):
client = self.client
client.wait('browser.visit', self.base_url)
self.assertEqual(
0, client.create_element('browser.html').index)
def test_create_elements(self):
client = self.client
client.wait('browser.visit', self.base_url)
res = client.create_elements('browser.queryAll', ('input', ))
self.assertEqual(list(range(6)), [x.index for x in res])
|
ryanpetrello/python-zombie
|
zombie/tests/test_client.py
|
Python
|
mit
| 4,899
|
[
"VisIt"
] |
6c96dc371589a9a91ace524710452d1901d6b1a388e32c67b3e7fca035394c00
|
import os, re, time, shutil, json
import dendropy
from seq_util import *
from date_util import *
from virus_stability import virus_stability
import rethinkdb as r
import hashlib
class tree_stability(object):
'''
Goes back through all virus objects, looks up their calculated ddg from outgroup. Virus_stability determines all
their other meta data. Prints all this information to /stability-data/ddg_output.txt. Assigns ddg to the 'ep'
attribute of all nodes.
'''
def __init__(self, **kwargs):
self.stability_output = "stability-data/"
self.output_file = open(self.stability_output + "ddg_output.txt", 'w')
self.sequence_to_stability = {}
if 'RETHINK_AUTH_KEY' in os.environ:
self.auth_key = os.environ['RETHINK_AUTH_KEY']
if self.auth_key is None:
raise Exception("Missing auth_key")
self.database = 'test'
self.table = 'stability'
self.connect_rethink()
self.structure_seqs = {}
self.structure_seqs['1HA0'] = "MKTIIALSYILCLVFAQKLPGNDNSTATLCLGHHAVPNGTLVKTITDDQIEVTNATELVQSSSTGKICNNPHRILDGIDCTLIDALLGDPHCDVFQNETWDLFVERSKAFSNCYPYDVPDYASLRSLVASSGTLEFITEGFTWTGVTQNGGSNACKRGPGSGFFSRLNWLTKSGSTYPVLNVTMPNNDNFDKLYIWGIHHPSTNQEQTSLYVQASGRVTVSTRRSQQTIIPNIGSRPWVRGLSSRISIYWTIVKPGDVLVINSNGNLIAPRGYFKMRTGKSSIMRSDAPIDTCISECITPNGSIPNDKPFQNVNKITYGACPKYVKQNTLKLATGMRNVPEKQTQGLFGAIAGFIENGWEGMIDGWYGFRHQNSEGTGQAADLKSTQAAIDQINGKLNRVIEKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFEKTRRQLRENAEEMGNGCFKIYHKCDNACIESIRNGTYDHDVYRNEALNNRFQI"
self.structure_seqs['2YP7'] = "MKTIIALSYILCLVFAQKLPGNDNSTATLCLGHHAVPNGTIVKTITNDQIEVTNATELVQSSSTGGICDSPHQILDGENCTLIDALLGDPQCDGFQNKKWDLFVERSKAYSNCYPYDVPDYASLRSLVASSGTLEFNNESFNWTGVTQNGTSSACKRKSNNSFFSRLNWLTHLKFKYPALNVTMPNNEKFDKLYIWGVHHPGTDNDQIFLYAQASGRITVSTKRSQQTVIPNIGSRPRVRNIPSRISIYWTIVKPGDILLINSTGNLIAPRGYFKIRSGKSSIMRSDAPIGKCNSECITPNGSIPNDKPFQNVNRITYGACPRYVKQNTLKLATGMRNVPEKQTRGIFGAIAGFIENGWEGMVDGWYGFRHQNSEGIGQAADLKSTQAAINQINGKLNRLIGKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFERTKKQLRENAEDMGNGCFKIYHKCDNACIGSIRNGTYDHDVYRDEALNNRFQIKGVELKSGYKDWILWISFAISCFLLCVALLGFIMWACQKGNIRCNICI"
self.structure_seqs['2YP2'] = "MKTIIALSYILCLVFAQKLPGNDNSTATLCLGHHAVPNGTIVKTITNDQIEVTNATELVQSSSTGGICDSPHQILDGENCTLIDALLGDPQCDGFQNKKWDLFVERSKAYSNCYPYDVPDYASLRSLVASSGTLEFNNESFNWTGVTQNGTSSACKRRSNNSFFSRLNWLTHLKFKYPALNVTMPNNEKFDKLYIWGVHHPGTDNDQISLYAQASGRITVSTKRSQQTVIPNIGSRPRVRDIPSRISIYWTIVKPGDILLINSTGNLIAPRGYFKIRSGKSSIMRSDAPIGKCNSECITPNGSIPNDKPFQNVNRITYGACPRYVKQNTLKLATGMRNVPEKQTRGIFGAIAGFIENGWEGMVDGWYGFRHQNSEGIGQAADLKSTQAAINQINGKLNRLIGKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFERTKKQLRENAEDMGNGCFKIYHKCDNACIGSIRNGTYDHDVYRDEALNNRFQIKGVELKSGYKDWILWISFAISCFLLCVALLGFIMWACQKGNIRCNICI"
def connect_rethink(self):
'''
Connect to rethink database,
Check for existing table, otherwise create it
'''
try:
r.connect(host="ec2-52-90-204-136.compute-1.amazonaws.com", port=28015, db=self.database, auth_key=self.auth_key).repl()
print("Connected to the \"" + self.database + "\" database")
except:
print("Failed to connect to the database, " + self.database)
raise Exception
def calculate_stability(self):
print("Reading in new calculated stabilities for sequences")
self.sum_ddg()
self.classify()
#self.epistasis_ddG()
self.print_viruses()
self.assign_node_ddG()
def classify(self):
for virus in self.hash_to_virus.values():
if virus['trunk'] == True:
classification = 'trunk'
elif virus['tip'] == True:
classification = 'tip'
else:
classification = 'branch'
virus['classification'] = classification
def print_viruses(self):
print("Printing Viruses")
json.dump(self.hash_to_virus, self.output_file, indent=1)
self.output_file.close()
def epistasis_ddG(self):
'''
go through each virus object and determine the list of foldx formatted mutations for each structure. Also calculate
the ddG from the outgroup to the current virus for each structure
'''
for virus in self.hash_to_virus.values():
self.get_stability(virus)
def get_stability(self, virus):
'''
checks the stability table to see if the sequence already has had stability calculated for it
:return returns a list containing the stability output for that sequence, if it can't find the stability, raises an exception
'''
sequence = virus['seq']
hash_function = hashlib.md5()
hash_function.update(sequence)
hash_sequence = hash_function.hexdigest()
document = r.table(self.table).get(hash_sequence).run()
if document is not None:
if all(structure in document for structure in self.structures):
for structure in self.structures:
virus[structure]['epistasis_ddg'] = document[structure]
else:
print("Couldn't find " + hash_sequence + " in rethinkdb")
def sum_ddg(self):
'''
sum up individual ddg mutation effects compared to each structure
'''
print("Determining stability change by summing individual mutation effects on each structure")
self.open_mutator()
for virus in self.hash_to_virus.values():
for structure in self.structures:
virus[structure] = self.align_to_structure(virus, structure)
ddg = 0
for mut in virus[structure]['mutations']:
if '*' not in mut:
ddg += self.mutator_ddg[structure][mut]
virus[structure]['sum_ddg'] = ddg
def open_mutator(self):
self.mutator_ddg = {}
for structure in self.structures:
file = open("source-data/" + structure + "_mutator_ddg.txt", 'r')
mut_ddg = {}
for line in file:
info = line.split()
mut_ddg[info[1]] = float(info[0])
self.mutator_ddg[structure] = mut_ddg
def align_to_structure(self, virus, structure):
'''
aligns to structure sequence to virus sequence
:return: mutations from structure to self.seq
'''
mutations = []
structure_align_seq = self.structure_seqs[structure][24:]
virus_align_seq = virus['seq'][24:]
if (len(structure_align_seq)>virus_align_seq):
print("Outgroup Sequence longer than the virus sequence")
raise Exception
for index in range(len(structure_align_seq)):
site = index + 9 # for both 1HA0 and 2YP7, start at site number 9 in structure ("STAT...")
if structure_align_seq[index] != virus_align_seq[index]:
mutation = structure_align_seq[index] + str(site) + virus_align_seq[index]
mutations.append(mutation)
mutations = filter(lambda mut: self.site_range_valid(mut), mutations)
if structure == '1HA0':
virus['predtime'] = len(mutations)
return {'mutations': mutations}
#self.structure_muts[structure] = list(mutations_set)
def site_range_valid(self, mutation):
'''
protein structures (1HA0, 2YP7) are missing certain amino acid sites, method checks that mutation is in structure
:param mutation: mutation in standard format
:return: true if site is in structure, false if site range is not in structure
'''
lowerRange = 9
upperRange = 502
missing_lower = 328
missing_upper = 333
site = int(mutation[1:len(mutation) - 1])
if missing_lower <= site <= missing_upper: # in missing middle section
return False
elif lowerRange <= site <= upperRange: # in range of protein structure besides middle section
return True
else:
return False
'''
def viruses_parent_ddG(self):
go through each virus object and calculate ddG from the parent to the current virus, also get mutations from parent to new virus
print("Calculating parent to virus ddg")
print(len(self.virus_and_parent))
for pair in self.virus_and_parent:
virus = pair[0]
parent = pair[1]
try:
virus_ddg = self.sequence_to_stability[virus.seq]
except:
virus_ddg = ['0.0', '0.0']
print("Couldn't find in dictionary")
print(virus.seq)
try:
parent_ddg = self.sequence_to_stability[parent.seq]
except:
parent_ddg = ['0.0', '0.0']
print("Couldn't find in dictionary")
print(parent.seq)
virus.parent_strain = parent.strain
virus.calculate_ddg_parent(virus_ddg, parent_ddg)
virus.get_parent_mutations(virus.mutations_from_outgroup, parent.mutations_from_outgroup)
'''
def assign_node_ddG(self):
print("assigning ddg attribute to nodes")
for node in self.tree.postorder_node_iter():
hash = str(node)
virus = self.hash_to_virus[hash]
ddg = ((virus['2YP7']['sum_ddg'] + virus['2YP2']['sum_ddg']) / 2)
try:
setattr(node, 'yvalue', ddg)
setattr(node, 'ep', ddg)
except:
print("couldn't assign ddg attribute to current node")
|
blab/stability
|
augur/src/tree_stability.py
|
Python
|
agpl-3.0
| 9,594
|
[
"FoldX"
] |
84d17a0778b46ef46814c02e3222a246bf8199549cbb5585119da79f8b754966
|
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
import logging
allcg = AllCoordinationGeometries()
lgf = LocalGeometryFinder()
logging.basicConfig(format="%(levelname)s:%(module)s:%(funcName)s:%(message)s", level="DEBUG")
mp_symbol = "DD:20"
coordination = 20
myindices = [8, 12, 11, 0, 14, 10, 13, 6, 18, 1, 9, 17, 3, 19, 5, 7, 15, 2, 16, 4]
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
lgf.setup_test_perfect_environment(
mp_symbol,
randomness=False,
indices=myindices,
random_translation="NONE",
random_rotation="NONE",
random_scale="NONE",
)
se = lgf.compute_structure_environments(
only_indices=[0],
maximum_distance_factor=1.01 * cg.distfactor_max,
min_cn=cg.coordination_number,
max_cn=cg.coordination_number,
only_symbols=[mp_symbol],
)
print(se.ce_list[0][20][0])
|
vorwerkc/pymatgen
|
dev_scripts/chemenv/check_new_coordination_geometry.py
|
Python
|
mit
| 1,080
|
[
"pymatgen"
] |
61324ec0868dc3327548e00ff91917d2c825a1606235ca217fb8b4554450edef
|
# iCraft is Copyright 2010 both
#
# The Archives team:
# <Adam Guy> adam@adam-guy.com AKA "Adam01"
# <Andrew Godwin> andrew@aeracode.org AKA "Aera"
# <Dylan Lukes> lukes.dylan@gmail.com AKA "revenant"
# <Gareth Coles> colesgareth2@hotmail.com AKA "gdude2002"
#
# And,
#
# The iCraft team:
# <Andrew Caluzzi> tehcid@gmail.com AKA "tehcid"
# <Andrew Dolgov> fox@bah.org.ru AKA "gothfox"
# <Andrew Horn> Andrew@GJOCommunity.com AKA "AndrewPH"
# <Brad Reardon> brad@bradness.co.cc AKA "PixelEater"
# <Clay Sweetser> CDBKJmom@aol.com AKA "Varriount"
# <James Kirslis> james@helplarge.com AKA "iKJames"
# <Jason Sayre> admin@erronjason.com AKA "erronjason"
# <Jonathon Dunford> sk8rjwd@yahoo.com AKA "sk8rjwd"
# <Joseph Connor> destroyerx100@gmail.com AKA "destroyerx1"
# <Joshua Connor> fooblock@live.com AKA "Fooblock"
# <Kamyla Silva> supdawgyo@hotmail.com AKA "NotMeh"
# <Kristjan Gunnarsson> kristjang@ffsn.is AKA "eugo"
# <Nathan Coulombe> NathanCoulombe@hotmail.com AKA "Saanix"
# <Nick Tolrud> ntolrud@yahoo.com AKA "ntfwc"
# <Noel Benzinger> ronnygmod@gmail.com AKA "Dwarfy"
# <Randy Lyne> qcksilverdragon@gmail.com AKA "goober"
# <Willem van der Ploeg> willempieeploeg@live.nl AKA "willempiee"
#
#
# And,
#
# The iCraftBlah Single-Man modifier!
#
# <Brian Hansen> jabawakijadehawk@gmail.com AKA "blahblahbal"
#
#
# Disclaimer: Parts of this code may have been contributed by the end-users.
#
# iCraft is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# Or, send a letter to Creative Commons, 171 2nd Street,
# Suite 300, San Francisco, California, 94105, USA.
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(x,y-1,z)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = (x,y-1,z)
x,y,z = var_position
block = chr(21)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(3)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
closestposition = (0,0)
closestclient = None
closestdistance = None
for entry in userpositionlist:
client = entry[0]
var_pos = entry[1]
i,j,k = var_pos
distance = ((i-x)**2+(j-y)**2+(k-z)**2)**0.5
if closestdistance == None:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
else:
if distance < closestdistance:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
if closestdistance < 2:
closestclient.sendServerMessage("I pity the fool.")
i,k = closestposition
distance = ((i-x)**2+(k-z)**2)**0.5
if distance != 0 and distance > 2:
target = [int((i-x)/(distance/1.75)) + x,y,int((k-z)/(distance/1.75)) + z]
i,j,k = target
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(21)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(3)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
var_cango = True
target[1] = target[1] + 1
j = target[1]
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(21)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(3)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
|
TheArchives/Nexus
|
core/entities/mrt.py
|
Python
|
bsd-2-clause
| 6,657
|
[
"Brian",
"VisIt"
] |
3b1bba3c13e793c6e5563b2005d000f13fc058ac8b5649fc4a1dca7d10a6636e
|
import logging
import numpy as np
import pytest
import nengo
from neurons import AdaptiveLIF
from nengo.utils.numpy import rms
logger = logging.getLogger(__name__)
def test_alif_neuron(Simulator, plt):
"""Test that the adaptive LIF dynamic model matches the predicted rates
Tests a single neuron across multiple input currents
"""
tau_n = .1
inc_n = 10.
max_rates = np.array([8.095])
intercepts = np.array([.059])
# tau_n = .1
# inc_n = .1
# max_rates = np.array([200.])
# intercepts = np.array([.2])
alif_neuron = AdaptiveLIF(tau_rc=.05, tau_ref=.002,
tau_n=tau_n, inc_n=inc_n)
u_vals = np.array([
0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.])
Ts = [
.1, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0]
sim_rates = np.zeros_like(u_vals)
num_rates = np.zeros_like(u_vals)
for idx, (u, T) in enumerate(zip(u_vals, Ts)):
net = nengo.Network()
with net:
stim = nengo.Node(u)
net.ens = nengo.Ensemble(
1, 1, neuron_type=alif_neuron,
max_rates=max_rates, intercepts=intercepts)
nengo.Connection(stim, net.ens.neurons, transform=np.array([[1.]]),
synapse=None)
net.ps = nengo.Probe(net.ens.neurons, 'spikes')
sim = Simulator(net)
est_rate = net.ens.neuron_type.rates(
u, sim.data[net.ens].gain, sim.data[net.ens].bias)
sim.run(T)
num_rates[idx] = est_rate
spks = sim.data[net.ps]
spk_times = np.nonzero(spks)[0]*sim.dt
if len(spk_times) > 1:
isi = np.diff(spk_times)
sim_rates[idx] = 1. / np.mean(isi[-10:])
if est_rate > 0.:
rel_diff = abs(est_rate - sim_rates[idx]) / est_rate
assert rel_diff < .01, (
'Estimated rate differs from rate extracted from simulation' +
' by more than 1%% for u=%f' % (u))
else:
assert sim_rates[idx] == 0.
plt.plot(u_vals, sim_rates, 'bo', ms=6, label='simulated rate')
plt.plot(u_vals, num_rates, 'ro', ms=6, label='numerically estimated rate')
plt.legend(loc='upper left')
plt.xlabel('input')
plt.ylabel('rate')
def test_alif_neurons(Simulator, plt, rng):
"""Test that the adaptive LIF dynamic model matches the predicted rates
Tests an Ensemble of neurons at a single input value
"""
dt = 0.001
n = 100
x = .5
encoders = np.ones((n, 1))
max_rates = rng.uniform(low=10, high=200, size=n)
intercepts = rng.uniform(low=-1, high=1, size=n)
net = nengo.Network()
with net:
ins = nengo.Node(x)
ens = nengo.Ensemble(
n, dimensions=1, encoders=encoders,
max_rates=max_rates, intercepts=intercepts,
neuron_type=AdaptiveLIF(tau_n=.1, inc_n=.1))
nengo.Connection(ins, ens.neurons, transform=np.ones((n, 1)),
synapse=None)
spike_probe = nengo.Probe(ens.neurons)
voltage_probe = nengo.Probe(ens.neurons, 'voltage')
adaptation_probe = nengo.Probe(ens.neurons, 'adaptation')
ref_probe = nengo.Probe(ens.neurons, 'refractory_time')
sim = Simulator(net, dt=dt)
t_final = 3.0
t_ss = 1.0 # time to consider neurons at steady state
sim.run(t_final)
n_select = rng.choice(n) # pick a random neuron
t = sim.trange()
idx = t < t_ss
plt.figure(figsize=(10, 6))
plt.subplot(411)
plt.plot(t[idx], sim.data[spike_probe][idx, n_select])
plt.ylabel('spikes')
plt.subplot(412)
plt.plot(t[idx], sim.data[voltage_probe][idx, n_select])
plt.ylabel('voltage')
plt.subplot(413)
plt.plot(t[idx], sim.data[adaptation_probe][idx, n_select])
plt.ylabel('adaptation')
plt.subplot(414)
plt.plot(t[idx], sim.data[ref_probe][idx, n_select])
plt.ylim([-dt, ens.neuron_type.tau_ref + dt])
plt.xlabel('time')
plt.ylabel('ref time')
# check rates against analytic rates
math_rates = ens.neuron_type.rates(
x, *ens.neuron_type.gain_bias(max_rates, intercepts))
idx = t >= t_ss
spikes = sim.data[spike_probe][idx, :]
sim_rates = (spikes > 0).sum(0) / (t_final - t_ss)
logger.debug("ME = %f", (sim_rates - math_rates).mean())
logger.debug("RMSE = %f",
rms(sim_rates - math_rates) / (rms(math_rates) + 1e-20))
assert np.sum(math_rates > 0) > 0.5 * n, (
"At least 50% of neurons must fire")
assert np.allclose(sim_rates, math_rates, atol=1, rtol=0.001)
if __name__ == "__main__":
nengo.log(debug=True)
pytest.main([__file__, '-v'])
|
fragapanagos/adaptive_lif
|
test_neurons.py
|
Python
|
mit
| 4,689
|
[
"NEURON"
] |
61fd7ea29d30cd592336e88f1e5a28fb634af9925224b17dad1037c93ebab373
|
'''
Utilities for tests.
'''
from six.moves import zip
import numpy as np
import astropy.units as u
from astropy.io import fits
from astropy.utils import NumpyRNGContext
from ..spectral_cube import SpectralCube
def generate_header(pixel_scale, spec_scale, beamfwhm, imshape, v0):
header = {'CDELT1': -(pixel_scale).to(u.deg).value,
'CDELT2': (pixel_scale).to(u.deg).value,
'BMAJ': beamfwhm.to(u.deg).value,
'BMIN': beamfwhm.to(u.deg).value,
'BPA': 0.0,
'CRPIX1': imshape[0] / 2.,
'CRPIX2': imshape[1] / 2.,
'CRVAL1': 0.0,
'CRVAL2': 0.0,
'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRVAL3': v0,
'CUNIT3': spec_scale.unit.to_string(),
'CDELT3': spec_scale.value,
'CRPIX3': 1,
'CTYPE3': 'VRAD',
'BUNIT': 'K',
}
return fits.Header(header)
def generate_hdu(data, pixel_scale, spec_scale, beamfwhm, v0):
imshape = data.shape[1:]
header = generate_header(pixel_scale, spec_scale, beamfwhm, imshape, v0)
return fits.PrimaryHDU(data, header)
def gaussian(x, amp, mean, sigma):
return amp * np.exp(- (x - mean)**2 / (2 * sigma**2))
def generate_gaussian_cube(shape=(100, 25, 25), sigma=8., amp=1.,
noise=None, spec_scale=1 * u.km / u.s,
pixel_scale=1 * u.arcsec,
beamfwhm=3 * u.arcsec,
v0=None,
vel_surface=None,
seed=247825498):
'''
Generate a SpectralCube with Gaussian profiles.
The peak velocity positions can be given with `vel_surface`. Otherwise,
the peaks of the profiles are randomly assigned positions in the cubes.
This is primarily to test shuffling and stacking of spectra, rather than
trying to be being physically meaningful.
Returns
-------
spec_cube : SpectralCube
The generated cube.
mean_positions : array
The peak positions in the cube.
'''
test_cube = np.empty(shape)
mean_positions = np.empty(shape[1:])
spec_middle = int(shape[0] / 2)
spec_quarter = int(shape[0] / 4)
if v0 is None:
v0 = 0
with NumpyRNGContext(seed):
spec_inds = np.mgrid[-spec_middle:spec_middle] * spec_scale.value
if len(spec_inds) == 0:
spec_inds = np.array([0])
spat_inds = np.indices(shape[1:])
for y, x in zip(spat_inds[0].flatten(), spat_inds[1].flatten()):
# Lock the mean to within 25% from the centre
if vel_surface is not None:
mean_pos = vel_surface[y,x]
else:
mean_pos = \
np.random.uniform(low=spec_inds[spec_quarter],
high=spec_inds[spec_quarter + spec_middle])
test_cube[:, y, x] = gaussian(spec_inds, amp, mean_pos, sigma)
mean_positions[y, x] = mean_pos + v0
if noise is not None:
test_cube[:, y, x] += np.random.normal(0, noise, shape[0])
test_hdu = generate_hdu(test_cube, pixel_scale, spec_scale, beamfwhm,
spec_inds[0] + v0)
spec_cube = SpectralCube.read(test_hdu)
mean_positions = mean_positions * spec_scale.unit
return spec_cube, mean_positions
|
jzuhone/spectral-cube
|
spectral_cube/tests/utilities.py
|
Python
|
bsd-3-clause
| 3,524
|
[
"Gaussian"
] |
6d09cbf3e4ba26802982a45ca57165d030031faa217f1f064f2f63a94123b8f9
|
# -*- coding: utf-8 -*-
import sys
import math
from pylab import *
try:
import moose
except ImportError:
print("ERROR: Could not import moose. Please add the directory containing moose.py in your PYTHONPATH")
import sys
sys.exit(1)
from moose.utils import * # for BSplineFill
class GluSyn_STG(moose.SynChan):
"""Glutamate graded synapse"""
def __init__(self, *args):
moose.SynChan.__init__(self,*args)
self.Ek = -70e-3 # V
# For event based synapses, I had a strength of 5e-6 S
# to compensate for event-based,
# but for the original graded synapses, 5e-9 S is correct.
self.Gbar = 5e-9 # S # set weight on connecting the network
self.tau1 = 40e-3 # s # this is Vpre dependent (see below)
self.tau2 = 0.0 # single first order equation
Vth = -35e-3 # V
Delta = 5e-3 # V
######## Graded synapse activation
inhsyntable = moose.Interpol(self.path+"/graded_table")
graded = moose.Mstring(self.path+'/graded')
graded.value = 'True'
mgblock = moose.Mstring(self.path+'/mgblockStr')
mgblock.value = 'False'
# also needs a synhandler
moosesynhandler = moose.SimpleSynHandler(self.path+'/handler')
# connect the SimpleSynHandler to the SynChan (double exp)
moose.connect( moosesynhandler, 'activationOut', self, 'activation' )
# ds/dt = s_inf/tau - s/tau = A - Bs
# where A=s_inf/tau is activation, B is 1/tau
# Fill up the activation and tau tables
# Graded synapse tau
inhtautable = moose.Interpol(self.path+"/tau_table")
inhtautable.xmin = -70e-3 # V
inhtautable.xmax = 0e-3 # V
tau = [self.tau1] # at -70 mV
tau.extend( [self.tau1*(1. - 1./(1+math.exp((Vth-vm)/Delta))) \
for vm in arange(-70e-3,0.00001e-3,70e-3/1000.)] )
inhtautable.vector = array(tau)
inhtautable.connect("lookupOut",self,"setTau1")
# Graded synapse activation
inhsyntable.xmin = -70e-3 # V
inhsyntable.xmax = 0e-3 # V
act = [0.0] # at -70 mV
act.extend( [1/(1+math.exp((Vth-vm)/Delta)) \
for vm in arange(-70e-3,0.00001e-3,70e-3/1000.)] )
act = array(act) / array(tau) # element-wise division # NOTE: A = s_inf/tau
inhsyntable.vector = array(act)
inhsyntable.connect("lookupOut",self,"activation")
|
BhallaLab/moose-examples
|
neuroml/lobster_pyloric/synapses/GluSyn_STG.py
|
Python
|
gpl-2.0
| 2,442
|
[
"MOOSE"
] |
ff0527035c1c8792577135dbc240a535b1e9ca1caf42b3688ca332d57931ec20
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from functools import reduce
from docker.errors import APIError
from docker.errors import NotFound
from .config import ConfigurationError
from .config import get_service_name_from_net
from .const import DEFAULT_TIMEOUT
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .legacy import check_for_legacy_containers
from .service import ContainerNet
from .service import ConvergenceStrategy
from .service import Net
from .service import parse_volume_from_spec
from .service import Service
from .service import ServiceNet
from .service import VolumeFromSpec
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [
parse_volume_from_spec(volume_from).source
for volume_from in volumes_from
]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client, use_networking=False, network_driver=None):
self.name = name
self.services = services
self.client = client
self.use_networking = use_networking
self.network_driver = network_driver
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client, use_networking=False, network_driver=None):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client, use_networking=use_networking, network_driver=network_driver)
if use_networking:
remove_links(service_dicts)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(
Service(
client=client,
project=name,
use_networking=use_networking,
links=links,
net=net,
volumes_from=volumes_from,
**service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_from_config in service_dict.get('volumes_from', []):
volume_from_spec = parse_volume_from_spec(volume_from_config)
# Get service
try:
service_name = self.get_service(volume_from_spec.source)
volume_from_spec = VolumeFromSpec(service_name, volume_from_spec.mode)
except NoSuchService:
try:
container_name = Container.from_id(self.client, volume_from_spec.source)
volume_from_spec = VolumeFromSpec(container_name, volume_from_spec.mode)
except APIError:
raise ConfigurationError(
'Service "%s" mounts volumes from "%s", which is '
'not the name of a service or container.' % (
service_dict['name'],
volume_from_spec.source))
volumes_from.append(volume_from_spec)
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
net = service_dict.pop('net', None)
if not net:
if self.use_networking:
return Net(self.name)
return Net(None)
net_name = get_service_name_from_net(net)
if not net_name:
return Net(net)
try:
return ServiceNet(self.get_service(net_name))
except NoSuchService:
pass
try:
return ContainerNet(Container.from_id(self.client, net_name))
except APIError:
raise ConfigurationError(
'Service "%s" is trying to use the network of "%s", '
'which is not the name of a service or container.' % (
service_dict['name'],
net_name))
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def pause(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.pause(**options)
def unpause(self, service_names=None, **options):
for service in self.get_services(service_names):
service.unpause(**options)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False, pull=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=True,
timeout=DEFAULT_TIMEOUT,
detached=False):
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(services, strategy)
if self.use_networking:
self.ensure_network_exists()
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout,
detached=detached
)
]
def _get_convergence_plans(self, services, strategy):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and strategy.allows_recreate:
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
plans[service.name] = plan
return plans
def pull(self, service_names=None, ignore_pull_failures=False):
for service in self.get_services(service_names, include_deps=False):
service.pull(ignore_pull_failures)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]))
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return [c for c in containers if matches_service_names(c)]
def get_network(self):
try:
return self.client.inspect_network(self.name)
except NotFound:
return None
def ensure_network_exists(self):
# TODO: recreate network if driver has changed?
if self.get_network() is None:
log.info(
'Creating network "{}" with driver "{}"'
.format(self.name, self.network_driver)
)
self.client.create_network(self.name, driver=self.network_driver)
def remove_network(self):
network = self.get_network()
if network:
self.client.remove_network(network['id'])
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
def remove_links(service_dicts):
services_with_links = [s for s in service_dicts if 'links' in s]
if not services_with_links:
return
if len(services_with_links) == 1:
prefix = '"{}" defines'.format(services_with_links[0]['name'])
else:
prefix = 'Some services ({}) define'.format(
", ".join('"{}"'.format(s['name']) for s in services_with_links))
log.warn(
'\n{} links, which are not compatible with Docker networking and will be ignored.\n'
'Future versions of Docker will not support links - you should remove them for '
'forwards-compatibility.\n'.format(prefix))
for s in services_with_links:
del s['links']
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
|
KevinGreene/compose
|
compose/project.py
|
Python
|
apache-2.0
| 15,116
|
[
"VisIt"
] |
0006bdd5fdd8e867a83f2775a64d0c2a5d27d3f71e621972605b2428f7cf4e90
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contact "Bingbing Suo" <bsuo@nwu.edu.cn> to download and install Xian-CI
# program.
#
'''
Generate Xian-CI input file and integral file
'''
from functools import reduce
import numpy
import h5py
from pyscf import ao2mo
from pyscf import symm
def write_integrals(xci, orb):
mol = xci.mol
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, orb)
h1e = reduce(numpy.dot, (orb.T, xci.get_hcore(), orb))
norb = orb.shape[1]
if xci._eri is not None:
h2e = ao2mo.restore(1, ao2mo.full(xci._eri, orb), norb)
else:
h2e = ao2mo.restore(1, ao2mo.full(mol, orb), norb)
with h5py.File(xci.integralfile, 'w') as f:
f['h1e'] = h1e
f['h2e'] = h2e
f['norb' ] = numpy.array(norb, dtype=numpy.int32)
f['group' ] = mol.groupname
f['orbsym'] = numpy.asarray(orbsym, dtype=numpy.int32)
f['ecore' ] = mol.energy_nuc()
def write_input(xci, orb):
mol = xci.mol
with open(xci.inputfile, 'w') as f:
f.write('%d 1 %d %g %g ! nroots; default; IC modes(0: UC, 1: WK, 2: CW, 3: VD, 4: FC, 8: DS, 9: SS, 10: SD); PLP cut; Ref cut \n' %
(xci.nroots, xci.ic_mode, xci.plpcut, xci.refcut))
f.write('%d %2.1f %d %d %f ! CI electrons; spin value; total irrep; irrep index; CI coeff print criterion \n' %
(xci.nelec, mol.spin*.5, len(mol.irrep_id), xci.wfnsym,
xci.print_thr))
f.write('%d %d %d %d ! norb_frz,norb_dz,norb_act,next_frz\n' %
(xci.frozen, xci.ndocc, xci.ncas, xci.next_frozen))
f.write('%d %d ! RUNPT2 modes(1: MS-SR-MRPT2; 2: MS-MR-MRPT2; 3: Dyall-MRPT2) ; RUNCI modes(0: ICMRCI; 1: ICMRPT2)\n' %
(xci.pt2_mode, xci.ic_mode))
f.write('0 0 0 0 0 0\n')
f.write('4 0 ! default\n')
class XianCiInpHandler(object):
def __init__(self, method, inputfile, integralfile):
self.mol = method.mol
assert(mol.symmetry)
if getattr(method, '_eri', None) is not None:
self._eri = method._eri
elif (getattr(method, '_scf', None) and
getattr(method._scf, '_eri', None) is not None):
self._eri = method._scf._eri
else:
self._eri = None
self.get_hcore = method.get_hcore
self.mo_coeff = method.mo_coeff
#self.exe = settings.XIANCIEXE
self.inputfile = inputfile
self.integralfile = integralfile
self.nroots = 1
self.nelec = mol.nelectron
self.ic_mode = 0
self.plpcut = 1e-3
self.refcut = 1e-3
self.print_thr = 0.05
self.wfnsym = 1
self.frozen = 0
self.ndocc = 0
self.ncas = self.mo_coeff.shape[1]
self.next_frozen = 0
self.pt2_mode = 0
self.ic_mode = 0
def gen_input(self):
write_input(self, self.mo_coeff)
write_integrals(self, self.mo_coeff)
def kernel(self):
self.gen_input()
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(
atom = [['O',(0, 0, 0)],
['H',(0.790689766, 0, 0.612217330)],
['H',(-0.790689766, 0, 0.612217330)]],
basis = 'ccpvdz',
symmetry = 1)
mf = scf.RHF(mol).run()
XianCiInpHandler(mf, 'drt.inp', 'eri.h5').kernel()
|
gkc1000/pyscf
|
pyscf/xianci/xianci.py
|
Python
|
apache-2.0
| 4,002
|
[
"PySCF"
] |
091062fc180987ef85ef47efd59ab97233a2d70c8439f5f2f12bbfbfc1c8b062
|
#!/usr/bin/env python
"""
roms.interp
Methods to interpolate ROMS fields onto other grids
Written by Brian Powell on 11/02/13
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import numpy as np
import netCDF4
import os
import seapy
from seapy.timeout import timeout, TimeoutError
from joblib import Parallel, delayed
from warnings import warn
_up_scaling = {"zeta": 1.0, "u": 1.0, "v": 1.0, "temp": 1.0, "salt": 1.0}
_down_scaling = {"zeta": 1.0, "u": 0.99,
"v": 0.99, "temp": 0.99, "salt": 1.001}
_ksize_range = (7, 15)
# Limit amount of memory in bytes to process in a single read. This determines how to
# divide up the time-records in interpolation
_max_memory = 768 * 1024 * 1024 # 768 MBytes
def __mask_z_grid(z_data, src_depth, z_depth):
"""
When interpolating to z-grid, we need to apply depth dependent masking
based on the original ROMS depths
"""
for k in np.arange(0, z_depth.shape[0]):
idx = np.nonzero(z_depth[k, :, :] < src_depth)
if z_data.ndim == 4:
z_data.mask[:, k, idx[0], idx[1]] = True
elif z_data.ndim == 3:
z_data.mask[k, idx[0], idx[1]] = True
def __interp2_thread(rx, ry, data, zx, zy, pmap, weight, nx, ny, mask):
"""
internal routine: 2D interpolation thread for parallel interpolation
"""
data = np.ma.fix_invalid(data, copy=False)
# Convolve the water over the land
ksize = 2 * np.round(np.sqrt((nx / np.median(np.diff(rx)))**2 +
(ny / np.median(np.diff(ry.T)))**2)) + 1
if ksize < _ksize_range[0]:
warn("nx or ny values are too small for stable OA, {:f}".format(ksize))
ksize = _ksize_range[0]
elif ksize > _ksize_range[1]:
warn("nx or ny values are too large for stable OA, {:f}".format(ksize))
ksize = _ksize_range[1]
data = seapy.convolve_mask(data, ksize=ksize, copy=False)
# Interpolate the field and return the result
with timeout(minutes=30):
res, pm = seapy.oasurf(rx, ry, data, zx, zy, pmap, weight, nx, ny)
return np.ma.masked_where(np.logical_or(mask == 0, np.abs(res) > 9e4), res,
copy=False)
def __interp3_thread(rx, ry, rz, data, zx, zy, zz, pmap,
weight, nx, ny, mask, up_factor=1.0, down_factor=1.0):
"""
internal routine: 3D interpolation thread for parallel interpolation
"""
# Make the mask 3D
mask = seapy.adddim(mask, zz.shape[0])
data = np.ma.fix_invalid(data, copy=False)
# To avoid extrapolation, we are going to convolve ocean over the land
# and add a new top and bottom layer that replicates the data of the
# existing current and top. 1) iteratively convolve until we have
# filled most of the points, 2) Determine which way the
# depth goes and add/subtract new layers, and 3) fill in masked values
# from the layer above/below.
gradsrc = (rz[0, 1, 1] - rz[-1, 1, 1]) > 0
# Convolve the water over the land
ksize = 2 * np.round(np.sqrt((nx / np.median(np.diff(rx)))**2 +
(ny / np.median(np.diff(ry.T)))**2)) + 1
if ksize < _ksize_range[0]:
warn("nx or ny values are too small for stable OA, {:f}".format(ksize))
ksize = _ksize_range[0]
elif ksize > _ksize_range[1]:
warn("nx or ny values are too large for stable OA, {:f}".format(ksize))
ksize = _ksize_range[1]
# Iterate at most 5 times, but we will hopefully break out before that by
# checking if we have filled at least 40% of the bottom to be like
# the surface
bot = -1 if gradsrc else 0
top = 0 if gradsrc else -1
topmask = np.maximum(1, np.ma.count_masked(data[top, :, :]))
if np.ma.count_masked(data[bot, :, :]) > 0:
for iter in range(5):
# Check if we have most everything by checking the bottom
data = seapy.convolve_mask(data, ksize=ksize + iter, copy=False)
if topmask / np.maximum(1, np.ma.count_masked(data[bot, :, :])) > 0.4:
break
# Now fill vertically
nrz = np.zeros((data.shape[0] + 2, data.shape[1], data.shape[2]))
nrz[1:-1, :, :] = rz
nrz[bot, :, :] = rz[bot, :, :] - 5000
nrz[top, :, :] = 1
if not gradsrc:
# The first level is the bottom
# factor = down_factor
levs = np.arange(data.shape[0], 0, -1) - 1
else:
# The first level is the top
# factor = up_factor
levs = np.arange(0, data.shape[0])
# Fill in missing values where we have them from the shallower layer
for k in levs[1:]:
if np.ma.count_masked(data[k, :, :]) == 0:
continue
idx = np.nonzero(np.logical_xor(data.mask[k, :, :],
data.mask[k - 1, :, :]))
data.mask[k, idx[0], idx[1]] = data.mask[k - 1, idx[0], idx[1]]
data[k, idx[0], idx[1]] = data[k - 1, idx[0], idx[1]] * down_factor
# Add upper and lower boundaries
ndat = np.zeros((data.shape[0] + 2, data.shape[1], data.shape[2]))
ndat[bot, :, :] = data[bot, :, :].filled(np.nan) * down_factor
ndat[1:-1, :, :] = data.filled(np.nan)
ndat[top, :, :] = data[top, :, :].filled(np.nan) * up_factor
# Interpolate the field and return the result
with timeout(minutes=30):
if gradsrc:
res, pm = seapy.oavol(rx, ry, nrz[::-1, :, :], ndat[::-1, :, :],
zx, zy, zz, pmap, weight, nx, ny)
else:
res, pm = seapy.oavol(rx, ry, nrz, ndat, zx, zy, zz,
pmap, weight, nx, ny)
return np.ma.masked_where(np.logical_or(mask == 0, np.abs(res) > 9e4), res,
copy=False)
def __interp3_vel_thread(rx, ry, rz, ra, u, v, zx, zy, zz, za, pmap,
weight, nx, ny, mask):
"""
internal routine: 3D velocity interpolation thread for parallel interpolation
"""
# Put on the same grid
if u.shape != v.shape:
u = seapy.model.u2rho(u, fill=True)
v = seapy.model.v2rho(v, fill=True)
# Rotate the fields (NOTE: ROMS angle is negative relative to "true")
if ra is not None:
u, v = seapy.rotate(u, v, ra)
# Interpolate
u = __interp3_thread(rx, ry, rz, u, zx, zy, zz, pmap,
weight, nx, ny, mask, _up_scaling["u"],
_down_scaling["u"])
v = __interp3_thread(rx, ry, rz, v, zx, zy, zz, pmap,
weight, nx, ny, mask, _up_scaling["v"],
_down_scaling["v"])
# Rotate to destination (NOTE: ROMS angle is negative relative to "true")
if za is not None:
u, v = seapy.rotate(u, v, -za)
# Return the masked data
return u, v
def __interp_grids(src_grid, child_grid, ncout, records=None,
threads=2, nx=0, ny=0, weight=10, vmap=None, z_mask=False,
pmap=None):
"""
internal method: Given a model file (average, history, etc.),
interpolate the fields onto another gridded file.
Parameters
----------
src_grid : seapy.model.grid data source (History, Average, etc. file)
child_grid : seapy.model.grid output data grid
ncout : netcdf output file
[records] : array of the record indices to interpolate
[threads] : number of processing threads
[nx] : decorrelation length in grid-cells for x
[ny] : decorrelation length in grid-cells for y
[vmap] : variable name mapping
[z_mask] : mask out depths in z-grids
[pmap] : use the specified pmap rather than compute it
Returns
-------
None
"""
# If we don't have a variable map, then do a one-to-one mapping
if vmap is None:
vmap = dict()
for k in seapy.roms.fields:
vmap[k] = k
# Generate a file to store the pmap information
sname = getattr(src_grid, 'name', None)
cname = getattr(child_grid, 'name', None)
pmap_file = None if any(v is None for v in (sname, cname)) else \
sname + "_" + cname + "_pmap.npz"
# Create or load the pmaps depending on if they exist
if nx == 0:
if hasattr(src_grid, "dm") and hasattr(child_grid, "dm"):
nx = np.ceil(np.mean(src_grid.dm) / np.mean(child_grid.dm))
else:
nx = 5
if ny == 0:
if hasattr(src_grid, "dn") and hasattr(child_grid, "dn"):
ny = np.ceil(np.mean(src_grid.dn) / np.mean(child_grid.dn))
else:
ny = 5
if pmap is None:
if pmap_file is not None and os.path.isfile(pmap_file):
pmap = np.load(pmap_file)
else:
tmp = np.ma.masked_equal(src_grid.mask_rho, 0)
tmp, pmaprho = seapy.oasurf(src_grid.lon_rho, src_grid.lat_rho,
tmp, child_grid.lon_rho, child_grid.lat_rho,
weight=weight, nx=nx, ny=ny)
tmp = np.ma.masked_equal(src_grid.mask_u, 0)
tmp, pmapu = seapy.oasurf(src_grid.lon_u, src_grid.lat_u,
tmp, child_grid.lon_rho, child_grid.lat_rho,
weight=weight, nx=nx, ny=ny)
tmp = np.ma.masked_equal(src_grid.mask_v, 0)
tmp, pmapv = seapy.oasurf(src_grid.lon_v, src_grid.lat_v,
tmp, child_grid.lon_rho, child_grid.lat_rho,
weight=weight, nx=nx, ny=ny)
if pmap_file is not None:
np.savez(pmap_file, pmaprho=pmaprho, pmapu=pmapu, pmapv=pmapv)
pmap = {"pmaprho": pmaprho, "pmapu": pmapu, "pmapv": pmapv}
# Get the time field
ncsrc = seapy.netcdf(src_grid.filename)
time = seapy.roms.get_timevar(ncsrc)
# Interpolate the depths from the source to final grid
src_depth = np.min(src_grid.depth_rho, 0)
dst_depth = __interp2_thread(src_grid.lon_rho, src_grid.lat_rho, src_depth,
child_grid.lon_rho, child_grid.lat_rho, pmap[
"pmaprho"],
weight, nx, ny, child_grid.mask_rho)
# Interpolate the scalar fields
records = np.arange(0, ncsrc.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
for src in vmap:
dest = vmap[src]
# Extra fields will probably be user tracers (biogeochemical)
fld = seapy.roms.fields.get(dest, {"dims": 3})
# Only interpolate the fields we want in the destination
if (dest not in ncout.variables) or ("rotate" in fld):
continue
if fld["dims"] == 2:
# Compute the max number of hold in memory
maxrecs = np.maximum(1, np.minimum(len(records),
np.int(_max_memory / (child_grid.lon_rho.nbytes +
src_grid.lon_rho.nbytes))))
for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
outr = np.s_[
rn * maxrecs:np.minimum((rn + 1) * maxrecs, len(records))]
ndata = np.ma.array(Parallel(n_jobs=threads, verbose=2, max_nbytes=_max_memory)
(delayed(__interp2_thread)(
src_grid.lon_rho, src_grid.lat_rho,
ncsrc.variables[src][i, :, :],
child_grid.lon_rho, child_grid.lat_rho,
pmap["pmaprho"], weight,
nx, ny, child_grid.mask_rho)
for i in recs), copy=False)
ncout.variables[dest][outr, :, :] = ndata
ncout.sync()
else:
maxrecs = np.maximum(1, np.minimum(
len(records), np.int(_max_memory /
(child_grid.lon_rho.nbytes *
child_grid.n +
src_grid.lon_rho.nbytes *
src_grid.n))))
for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
outr = np.s_[
rn * maxrecs:np.minimum((rn + 1) * maxrecs, len(records))]
ndata = np.ma.array(Parallel(n_jobs=threads, verbose=2, max_nbytes=_max_memory)
(delayed(__interp3_thread)(
src_grid.lon_rho, src_grid.lat_rho,
src_grid.depth_rho,
ncsrc.variables[src][i, :, :, :],
child_grid.lon_rho, child_grid.lat_rho,
child_grid.depth_rho,
pmap["pmaprho"], weight,
nx, ny, child_grid.mask_rho,
up_factor=_up_scaling.get(dest, 1.0),
down_factor=_down_scaling.get(dest, 1.0))
for i in recs), copy=False)
if z_mask:
__mask_z_grid(ndata, dst_depth, child_grid.depth_rho)
ncout.variables[dest][outr, :, :, :] = ndata
ncout.sync()
# Rotate and Interpolate the vector fields. First, determine which
# are the "u" and the "v" vmap fields
try:
velmap = {
"u": list(vmap.keys())[list(vmap.values()).index("u")],
"v": list(vmap.keys())[list(vmap.values()).index("v")]}
except:
warn("velocity not present in source file")
return
srcangle = src_grid.angle if src_grid.cgrid else None
dstangle = child_grid.angle if child_grid.cgrid else None
maxrecs = np.minimum(len(records),
np.int(_max_memory /
(2 * (child_grid.lon_rho.nbytes *
child_grid.n +
src_grid.lon_rho.nbytes *
src_grid.n))))
for nr, recs in enumerate(seapy.chunker(records, maxrecs)):
vel = Parallel(n_jobs=threads, verbose=2, max_nbytes=_max_memory)(delayed(__interp3_vel_thread)(
src_grid.lon_rho, src_grid.lat_rho,
src_grid.depth_rho, srcangle,
ncsrc.variables[velmap["u"]][i, :, :, :],
ncsrc.variables[velmap["v"]][i, :, :, :],
child_grid.lon_rho, child_grid.lat_rho,
child_grid.depth_rho, dstangle,
pmap["pmaprho"], weight, nx, ny,
child_grid.mask_rho) for i in recs)
for j in range(len(vel)):
vel_u = np.ma.array(vel[j][0], copy=False)
vel_v = np.ma.array(vel[j][1], copy=False)
if z_mask:
__mask_z_grid(vel_u, dst_depth, child_grid.depth_rho)
__mask_z_grid(vel_v, dst_depth, child_grid.depth_rho)
if child_grid.cgrid:
vel_u = seapy.model.rho2u(vel_u)
vel_v = seapy.model.rho2v(vel_v)
ncout.variables["u"][nr * maxrecs + j, :] = vel_u
ncout.variables["v"][nr * maxrecs + j, :] = vel_v
if "ubar" in ncout.variables:
# Create ubar and vbar
# depth = seapy.adddim(child_grid.depth_u, vel_u.shape[0])
ncout.variables["ubar"][nr * maxrecs + j, :] = \
np.sum(vel_u * child_grid.depth_u, axis=0) / \
np.sum(child_grid.depth_u, axis=0)
if "vbar" in ncout.variables:
# depth = seapy.adddim(child_grid.depth_v, vel_v.shape[0])
ncout.variables["vbar"][nr * maxrecs + j, :] = \
np.sum(vel_v * child_grid.depth_v, axis=0) / \
np.sum(child_grid.depth_v, axis=0)
ncout.sync()
# Return the pmap that was used
return pmap
def field2d(src_lon, src_lat, src_field, dest_lon, dest_lat, dest_mask=None,
nx=0, ny=0, weight=10, threads=2, pmap=None):
"""
Given a 2D field with time (dimensions [time, lat, lon]), interpolate
onto a new grid and return the new field. This is a helper function
when needing to interpolate data within files, etc.
Parameters
----------
src_lon: numpy.ndarray
longitude that field is on
src_lat: numpy.ndarray
latitude that field is on
src_field: numpy.ndarray
field to interpolate
dest_lon: numpy.ndarray
output longitudes to interpolate to
dest_lat: numpy.ndarray
output latitudes to interpolate to
dest_mask: numpy.ndarray, optional
mask to apply to interpolated data
reftime: datetime, optional:
Reference time as the epoch for z-grid file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
weight : int, optional:
number of points to use in weighting matrix
threads : int, optional:
number of processing threads
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Output
------
ndarray:
interpolated field on the destination grid
pmap:
the pmap used in the inerpolation
"""
if pmap is None:
tmp, pmap = seapy.oasurf(src_lon, src_lat, src_lat,
dest_lon, dest_lat, weight=weight, nx=nx, ny=ny)
if dest_mask is None:
dest_mask = np.ones(dest_lat.shape)
records = np.arange(0, src_field.shape[0])
maxrecs = np.maximum(1,
np.minimum(records.size,
np.int(_max_memory /
(dest_lon.nbytes + src_lon.nbytes))))
for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
nfield = np.ma.array(Parallel(n_jobs=threads, verbose=2)
(delayed(__interp2_thread)(
src_lon, src_lat, src_field[i, :, :],
dest_lon, dest_lat,
pmap, weight,
nx, ny, dest_mask)
for i in recs), copy=False)
return nfield, pmap
def field3d(src_lon, src_lat, src_depth, src_field, dest_lon, dest_lat,
dest_depth, dest_mask=None, nx=0, ny=0, weight=10,
threads=2, pmap=None):
"""
Given a 3D field with time (dimensions [time, z, lat, lon]), interpolate
onto a new grid and return the new field. This is a helper function
when needing to interpolate data within files, etc.
Parameters
----------
src_lon: numpy.ndarray
longitude that field is on
src_lat: numpy.ndarray
latitude that field is on
srf_depth: numpy.ndarray
depths of the field
src_field: numpy.ndarray
field to interpolate
dest_lon: numpy.ndarray
output longitudes to interpolate to
dest_lat: numpy.ndarray
output latitudes to interpolate to
dest_depth: numpy.ndarray
output depths to interpolate to
dest_mask: numpy.ndarray, optional
mask to apply to interpolated data
reftime: datetime, optional:
Reference time as the epoch for z-grid file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
weight : int, optional:
number of points to use in weighting matrix
threads : int, optional:
number of processing threads
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Output
------
ndarray:
interpolated field on the destination grid
pmap:
the pmap used in the inerpolation
"""
if pmap is None:
tmp, pmap = seapy.oasurf(src_lon, src_lat, src_lat,
dest_lon, dest_lat, weight=weight, nx=nx, ny=ny)
if dest_mask is None:
dest_mask = np.ones(dest_lat.shape)
records = np.arange(0, src_field.shape[0])
maxrecs = np.maximum(1,
np.minimum(records.size,
np.int(_max_memory /
(dest_lon.nbytes *
dest_depth.shape[0] +
src_lon.nbytes *
src_depth.shape[0]))))
for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
nfield = np.ma.array(Parallel(n_jobs=threads, verbose=2)
(delayed(__interp3_thread)(
src_lon, src_lat, src_depth,
src_field[i, :, :],
dest_lon, dest_lat, dest_depth,
pmap, weight, nx, ny, dest_mask,
up_factor=1, down_factor=1)
for i in recs), copy=False)
return nfield, pmap
def to_zgrid(roms_file, z_file, z_grid=None, depth=None, records=None,
threads=2, reftime=None, nx=0, ny=0, weight=10, vmap=None,
cdl=None, dims=2, pmap=None):
"""
Given an existing ROMS history or average file, create (if does not exit)
a new z-grid file. Use the given z_grid or otherwise build one with the
same horizontal extent and the specified depths and interpolate the
ROMS fields onto the z-grid.
Parameters
----------
roms_file : string,
File name of src file to interpolate from
z_file : string,
Name of desination file to write to
z_grid: (string or seapy.model.grid), optional:
Name or instance of output definition
depth: numpy.ndarray, optional:
array of depths to use for z-level
records : numpy.ndarray, optional:
Record indices to interpolate
threads : int, optional:
number of processing threads
reftime: datetime, optional:
Reference time as the epoch for z-grid file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
weight : int, optional:
number of points to use in weighting matrix
vmap : dictionary, optional
mapping source and destination variables
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
dims : int, optional
number of dimensions to use for lat/lon arrays (default 2)
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Returns
-------
pmap : ndarray
the weighting matrix computed during the interpolation
"""
roms_grid = seapy.model.asgrid(roms_file)
ncroms = seapy.netcdf(roms_file)
src_ref, time = seapy.roms.get_reftime(ncroms)
if reftime is not None:
src_ref = reftime
records = np.arange(0, ncroms.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
# Load the grid
if z_grid is not None:
z_grid = seapy.model.asgrid(z_grid)
elif os.path.isfile(z_file):
z_grid = seapy.model.asgrid(z_file)
if not os.path.isfile(z_file):
if z_grid is None:
lat = roms_grid.lat_rho.shape[0]
lon = roms_grid.lat_rho.shape[1]
if depth is None:
raise ValueError("depth must be specified")
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(depth), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = roms_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = roms_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = roms_grid.lat_rho
ncout.variables["lon"][:] = roms_grid.lon_rho
ncout.variables["depth"][:] = depth
ncout.variables["mask"][:] = roms_grid.mask_rho
ncout.sync()
z_grid = seapy.model.grid(z_file)
else:
lat = z_grid.lat_rho.shape[0]
lon = z_grid.lat_rho.shape[1]
dims = z_grid.spatial_dims
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(z_grid.z), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = z_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = z_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = z_grid.lat_rho
ncout.variables["lon"][:] = z_grid.lon_rho
ncout.variables["depth"][:] = z_grid.z
ncout.variables["mask"][:] = z_grid.mask_rho
else:
ncout = netCDF4.Dataset(z_file, "a")
ncout.variables["time"][:] = netCDF4.date2num(
netCDF4.num2date(ncroms.variables[time][records],
ncroms.variables[time].units),
ncout.variables["time"].units)
ncroms.close()
# Call the interpolation
try:
roms_grid.set_east(z_grid.east())
pmap = __interp_grids(roms_grid, z_grid, ncout, records=records,
threads=threads, nx=nx, ny=ny, vmap=vmap, weight=weight,
z_mask=True, pmap=pmap)
except TimeoutError:
print("Timeout: process is hung, deleting output.")
# Delete the output file
os.remove(z_file)
finally:
# Clean up
ncout.close()
return pmap
def to_grid(src_file, dest_file, dest_grid=None, records=None, threads=2,
reftime=None, nx=0, ny=0, weight=10, vmap=None, pmap=None):
"""
Given an existing model file, create (if does not exit) a
new ROMS history file using the given ROMS destination grid and
interpolate the ROMS fields onto the new grid. If an existing destination
file is given, it is interpolated onto the specified.
Parameters
----------
src_file : string,
Filename of src file to interpolate from
dest_file : string,
Name of desination file to write to
dest_grid: (string or seapy.model.grid), optional:
Name or instance of output definition
records : numpy.ndarray, optional:
Record indices to interpolate
threads : int, optional:
number of processing threads
reftime: datetime, optional:
Reference time as the epoch for ROMS file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
weight : int, optional:
number of points to use in weighting matrix
vmap : dictionary, optional
mapping source and destination variables
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Returns
-------
pmap : ndarray
the weighting matrix computed during the interpolation
"""
src_grid = seapy.model.asgrid(src_file)
if dest_grid is not None:
destg = seapy.model.asgrid(dest_grid)
if not os.path.isfile(dest_file):
ncsrc = seapy.netcdf(src_file)
src_ref, time = seapy.roms.get_reftime(ncsrc)
if reftime is not None:
src_ref = reftime
records = np.arange(0, ncsrc.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
ncout = seapy.roms.ncgen.create_ini(dest_file,
eta_rho=destg.eta_rho,
xi_rho=destg.xi_rho,
s_rho=destg.n,
reftime=src_ref,
title="interpolated from " + src_file)
destg.to_netcdf(ncout)
ncout.variables["ocean_time"][:] = netCDF4.date2num(
netCDF4.num2date(ncsrc.variables[time][records],
ncsrc.variables[time].units),
ncout.variables["ocean_time"].units)
ncsrc.close()
if os.path.isfile(dest_file):
ncout = netCDF4.Dataset(dest_file, "a")
if dest_grid is None:
destg = seapy.model.asgrid(dest_file)
# Call the interpolation
try:
src_grid.set_east(destg.east())
pmap = __interp_grids(src_grid, destg, ncout, records=records,
threads=threads, nx=nx, ny=ny, weight=weight,
vmap=vmap, pmap=pmap)
except TimeoutError:
print("Timeout: process is hung, deleting output.")
# Delete the output file
os.remove(dest_file)
finally:
# Clean up
ncout.close()
return pmap
def to_clim(src_file, dest_file, dest_grid=None, records=None,
clobber=False, cdl=None, threads=2, reftime=None,
nx=0, ny=0, weight=10, vmap=None, pmap=None):
"""
Given an model output file, create (if does not exit) a
new ROMS climatology file using the given ROMS destination grid and
interpolate the ROMS fields onto the new grid. If an existing destination
file is given, it is interpolated onto the specified.
Parameters
----------
src_file : string,
Filename of src file to interpolate from
dest_file : string,
Name of desination file to write to
dest_grid: (string or seapy.model.grid), optional:
Name or instance of output definition
records : numpy.ndarray, optional:
Record indices to interpolate
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
threads : int, optional:
number of processing threads
reftime: datetime, optional:
Reference time as the epoch for climatology file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
weight : int, optional:
number of points to use in weighting matrix
vmap : dictionary, optional
mapping source and destination variables
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Returns
-------
pmap : ndarray
the weighting matrix computed during the interpolation
"""
if dest_grid is not None:
destg = seapy.model.asgrid(dest_grid)
src_grid = seapy.model.asgrid(src_file)
ncsrc = seapy.netcdf(src_file)
src_ref, time = seapy.roms.get_reftime(ncsrc)
if reftime is not None:
src_ref = reftime
records = np.arange(0, ncsrc.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
ncout = seapy.roms.ncgen.create_clim(dest_file,
eta_rho=destg.ln,
xi_rho=destg.lm,
s_rho=destg.n,
reftime=src_ref,
clobber=clobber,
cdl=cdl,
title="interpolated from " + src_file)
src_time = netCDF4.num2date(ncsrc.variables[time][records],
ncsrc.variables[time].units)
ncout.variables["clim_time"][:] = netCDF4.date2num(
src_time, ncout.variables["clim_time"].units)
ncsrc.close()
else:
raise AttributeError(
"you must supply a destination file or a grid to make the file")
# Call the interpolation
try:
src_grid.set_east(destg.east())
pmap = __interp_grids(src_grid, destg, ncout, records=records, threads=threads,
nx=nx, ny=ny, vmap=vmap, weight=weight, pmap=pmap)
except TimeoutError:
print("Timeout: process is hung, deleting output.")
# Delete the output file
os.remove(dest_file)
finally:
# Clean up
ncout.close()
return pmap
pass
|
ocefpaf/seapy
|
seapy/roms/interp.py
|
Python
|
mit
| 33,035
|
[
"Brian",
"NetCDF"
] |
faafebeaa7636f81d7470c62753635fb0dfc1df26189ada9c4cc4785a0f302eb
|
#!/usr/bin/env python
import os
import urllib2
import gzip
import time
import numpy as np
import pandas as pd
from itertools import izip
from StringIO import StringIO
from collections import defaultdict
from biom import Table
from lxml import etree
from skbio.parse.sequences import parse_fastq, parse_fasta
import americangut as ag
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Daniel McDonald", "Adam Robbins-Pianka", "Jamie Morton",
"Justine Debelius"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
def get_path(path):
"""Get a relative path to the working directory
Parameters
----------
path : str
A path
Returns
-------
str
The filepath
Notes
-----
This method does not care if the path exists or not
"""
return os.path.join(ag.WORKING_DIR, path)
def get_new_path(path):
"""Get a new relative path to the working directory
Parameters
----------
path : str
A path that does not exist
Returns
-------
str
The filepath
Notes
-----
It is only assured that the path does not exist at the time of function
evaluation.
Raises
------
IOError
If the path exists
"""
path = get_path(path)
if os.path.exists(path):
raise IOError('%s already exists.' % path)
return path
def get_existing_path(path):
"""Get an existing relative path to the working directory
Parameters
----------
path : str
A path that exists
Returns
-------
str
The filepath
Notes
-----
It is only assured that the path exists at the time of function evaluation
Raises
------
IOError
If the path does not exist
"""
path = get_path(path)
if not os.path.exists(path):
raise IOError('%s does not exist.' % path)
return path
def parse_mapping_file(open_file):
"""return (header, [(sample_id, all_other_fields)])
"""
header = open_file.readline().strip()
res = []
for l in open_file:
res.append(l.strip().split('\t', 1))
return (header, res)
def verify_subset(table, mapping):
"""Returns True/False if the table is a subset"""
ids = set([i[0] for i in mapping])
t_ids = set(table.ids())
return t_ids.issubset(ids)
def slice_mapping_file(table, mapping):
"""Returns a new mapping corresponding to just the ids in the table"""
t_ids = set(table.ids())
res = []
for id_, l in mapping:
if id_ in t_ids:
res.append('\t'.join([id_, l]))
return res
def check_file(f, e=IOError):
"""Verify a file (or directory) exists"""
if not os.path.exists(f):
raise e("Cannot continue! The file %s does not exist!" % f)
def trim_fasta(input_fasta, output_fasta, length):
"""Trim FASTA sequences to a given length
input_fasta: should be an open file. Every two lines should compose a
complete FASTA record (header, sequence)
output_fasta: should be an open file ready for writing
length: what length to trim the sequences to. Sequences shorter than
length will not be modified.
"""
# reads the FASTA file two lines at at a time
# Assumptions: 1) each FASTA record is two lines
# 2) There are no incomplete FASTA records
for header, sequence in izip(input_fasta, input_fasta):
header = header.strip()
sequence = sequence.strip()[:length]
output_fasta.write("%s\n%s\n" % (header, sequence))
def concatenate_files(input_files, output_file, read_chunk=10000):
"""Concatenate all input files and produce an output file
input_fps is a list of open files
output_fp is an open file ready for writing
"""
for infile in input_files:
chunk = infile.read(read_chunk)
while chunk:
output_file.write(chunk)
chunk = infile.read(read_chunk)
def fetch_study_details(accession):
"""Fetch secondary accession and FASTQ details
yields [(secondary_accession, fastq_url)]
"""
url_fmt = "http://www.ebi.ac.uk/ena/data/warehouse/" \
"filereport?accession=%(accession)s&result=read_run&" \
"fields=secondary_sample_accession,submitted_ftp"
res = fetch_url(url_fmt % {'accession': accession})
for line in res.readlines()[1:]:
if 'ERA371447' in line:
# Corrupt sequence files were uploaded to EBI for one of the AG
# rounds. Ignoring entries associated with this accession works
# around the corruption
continue
parts = line.strip().split('\t')
if len(parts) != 2:
continue
else:
yield tuple(parts)
def fetch_url(url):
"""Return an open file handle"""
# really should use requests instead of urllib2
attempts = 0
res = None
while attempts < 5:
attempts += 1
try:
res = urllib2.urlopen(url)
except urllib2.HTTPError as e:
if e.code == 500:
time.sleep(5)
continue
else:
raise
if res is None:
raise ValueError("Failed at fetching %s" % url)
return StringIO(res.read())
def fetch_seqs_fastq(url):
"""Fetch a FTP item"""
# not using a url_fmt here as the directory structure has potential to
# be different between studies
if not url.startswith('ftp://'):
url = "ftp://%s" % url
res = fetch_url(url)
return gzip.GzipFile(fileobj=res)
def fetch_metadata_xml(accession):
"""Fetch sample metadata"""
url_fmt = "http://www.ebi.ac.uk/ena/data/view/%(accession)s&display=xml"
res = fetch_url(url_fmt % {'accession': accession})
metadata = xml_to_dict(res)
return metadata
def xml_to_dict(xml_fp):
""" Converts xml string to a dictionary
Parameters
----------
xml_fp : str
xml file ath
Returns
-------
metadata : dict
dictionary where the metadata headers are keys
and the values correspond participant survey results
"""
root = etree.parse(xml_fp).getroot()
sample = root.getchildren()[0]
metadata = {}
identifiers = sample.find('IDENTIFIERS')
barcode = identifiers.getchildren()[2].text.split(':')[-1]
attributes = sample.find('SAMPLE_ATTRIBUTES')
for node in attributes.iterfind('SAMPLE_ATTRIBUTE'):
tag, value = node.getchildren()
if value.text is None:
metadata[tag.text.strip('" ').upper()] = 'no_data'
else:
metadata[tag.text.strip('" ').upper()] = value.text.strip('" ')
description = sample.find('DESCRIPTION')
metadata['Description'] = description.text.strip('" ')
return barcode, metadata
def from_xmls_to_mapping_file(xml_paths, mapping_fp):
""" Create a mapping file from multiple xml strings
Accepts a list of xml paths, reads them and
converts them to a mapping file
Parameters
----------
xml_paths : list, file_paths
List of file paths for xml files
mapping_fp : str
File path for the resulting mapping file
"""
all_md = {}
all_cols = set(['BarcodeSequence', 'LinkerPrimerSequence'])
for xml_fp in xml_paths:
bc, md = xml_to_dict(xml_fp)
all_md[bc] = md
all_cols.update(md)
with open(mapping_fp, 'w') as md_f:
header = list(all_cols)
md_f.write('#SampleID\t')
md_f.write('\t'.join(header))
md_f.write('\n')
for sampleid, values in all_md.iteritems():
to_write = [values.get(k, "no_data").encode('utf-8')
for k in header]
to_write.insert(0, sampleid)
md_f.write('\t'.join(to_write))
md_f.write('\n')
def fetch_study(study_accession, base_dir):
"""Fetch and dump a study
Grab and dump a study. If sample_accessions
are specified, then only those specified samples
will be fetched and dumped
Parameters
----------
study_accession : str
Accession ID for the study
base_dir : str
Path of base directory to save the fetched results
Note
----
If sample_accession is None, then the entire study will be fetched
"""
if ag.is_test_env():
return 0
study_dir = os.path.join(base_dir, study_accession)
if ag.staged_raw_data() is not None:
os.symlink(ag.staged_raw_data(), study_dir)
elif not os.path.exists(study_dir):
os.mkdir(study_dir)
new_samples = 0
for sample, fastq_url in fetch_study_details(study_accession):
sample_dir = os.path.join(study_dir, sample)
if not os.path.exists(sample_dir):
# fetch files if it isn't already present
os.mkdir(sample_dir)
metadata_path = os.path.join(sample_dir,
'%s.txt' % sample)
fasta_path = os.path.join(sample_dir,
'%s.fna' % sample)
# write out fasta
with open(fasta_path, 'w') as fasta_out:
for id_, seq, qual in parse_fastq(fetch_seqs_fastq(fastq_url)):
fasta_out.write(">%s\n%s\n" % (id_, seq))
# write mapping xml
url_fmt = "http://www.ebi.ac.uk/ena/data/view/" + \
"%(accession)s&display=xml"
res = fetch_url(url_fmt % {'accession': sample})
with open(metadata_path, 'w') as md_f:
md_f.write(res.read())
new_samples += 1
return new_samples
def count_seqs(seqs_fp, subset=None):
"""Could the number of FASTA records"""
if subset is None:
return sum(1 for line in seqs_fp if line.startswith(">"))
else:
subset = set(subset)
count = 0
for id_, seq in parse_fasta(seqs_fp):
parts = id_.split()
# check if the ID is there, and handle the qiimedb suffix case
if parts[0] in subset:
count += 1
elif parts[0].split('.')[0] in subset:
count += 1
return count
def count_unique_participants(metadata_fp, criteria=None):
"""Count the number of unique participants"""
if criteria is None:
criteria = {}
header = {k: i for i, k in enumerate(
metadata_fp.next().strip().split('\t'))}
count = set()
for line in metadata_fp:
line = line.strip().split('\t')
keep = True
for crit, val in criteria.items():
if line[header[crit]] != val:
keep = False
if keep:
count.add(line[header['HOST_SUBJECT_ID']])
return len(count)
def count_samples(metadata_fp, criteria=None):
"""Count the number of samples
criteria : dict
Header keys and values to restrict by
"""
if criteria is None:
criteria = {}
header = {k: i for i, k in enumerate(
metadata_fp.next().strip().split('\t'))}
count = 0
for line in metadata_fp:
line = line.strip().split('\t')
keep = True
for crit, val in criteria.items():
if line[header[crit]] != val:
keep = False
if keep:
count += 1
return count
simple_matter_map = {
'feces': 'FECAL',
'sebum': 'SKIN',
'tongue': 'ORAL',
'skin': 'SKIN',
'mouth': 'ORAL',
'gingiva': 'ORAL',
'gingival epithelium': 'ORAL',
'nares': 'SKIN',
'skin of hand': 'SKIN',
'hand': 'SKIN',
'skin of head': 'SKIN',
'hand skin': 'SKIN',
'throat': 'ORAL',
'auricular region zone of skin': 'SKIN',
'mucosa of tongue': 'ORAL',
'mucosa of vagina': 'SKIN',
'palatine tonsil': 'ORAL',
'hard palate': 'ORAL',
'saliva': 'ORAL',
'stool': 'FECAL',
'vagina': 'SKIN',
'fossa': 'SKIN',
'buccal mucosa': 'ORAL',
'vaginal fornix': 'SKIN',
'hair follicle': 'SKIN',
'nostril': 'SKIN'
}
def clean_and_reformat_mapping(in_fp, out_fp, body_site_column_name,
exp_acronym):
"""Simplify the mapping file for use in figures
in_fp : input file-like object
out_fp : output file-like object
body_site_column_name : specify the column name for body
exp_acronym : short name for the study
Returns a dict containing a description of any unprocessed samples.
"""
errors = defaultdict(list)
mapping_lines = [l.strip('\n').split('\t') for l in in_fp]
header = mapping_lines[0]
header_low = [x.lower() for x in header]
bodysite_idx = header_low.index(body_site_column_name.lower())
country_idx = header_low.index('country')
new_mapping_lines = [header[:]]
new_mapping_lines[0].append('SIMPLE_BODY_SITE')
new_mapping_lines[0].append('TITLE_ACRONYM')
new_mapping_lines[0].append('TITLE_BODY_SITE')
new_mapping_lines[0].append('HMP_SITE')
for l in mapping_lines[1:]:
new_line = l[:]
sample_id = new_line[0]
body_site = new_line[bodysite_idx]
country = new_line[country_idx]
# grab the body site
if body_site.startswith('UBERON_'):
body_site = body_site.split('_', 1)[-1].replace("_", " ")
elif body_site.startswith('UBERON:'):
body_site = body_site.split(':', 1)[-1]
elif body_site in ['NA', 'unknown', '', 'no_data', 'None', 'Unknown',
'Unspecified']:
errors[('unspecified_bodysite', body_site)].append(sample_id)
continue
else:
raise ValueError("Cannot process: %s, %s" % (sample_id, body_site))
# remap the body site
if body_site.lower() not in simple_matter_map:
errors[('unknown_bodysite', body_site)].append(sample_id)
continue
else:
body_site = simple_matter_map[body_site.lower()]
if exp_acronym == 'HMP':
hmp_site = 'HMP-%s' % body_site
else:
hmp_site = body_site
# simplify the country
if country.startswith('GAZ:'):
new_line[country_idx] = country.split(':', 1)[-1]
new_line.append(body_site)
new_line.append(exp_acronym)
new_line.append("%s-%s" % (exp_acronym, body_site))
new_line.append(hmp_site)
new_mapping_lines.append(new_line)
out_fp.write('\n'.join(['\t'.join(l) for l in new_mapping_lines]))
out_fp.write('\n')
return errors
def add_alpha_diversity(map_, alphas):
"""Adds alpha diversity to the metadata
Parameters
----------
map_ : DataFrame
The metadata for all samples
alphas : dict
A dictionary keying the column name to a dataframe of the alpha
diversity information loaded from the `collate_alpha.py` alpha
diversity files.
Returns
-------
alpha_map : DataFrame
A pandas data frame with the alpha diveristy map attached
"""
alpha_means = pd.DataFrame.from_dict({
'%s' % metric: adf.astype(float).mean(1)
for metric, adf in alphas.iteritems()
})
metric = sorted(alpha_means.keys())[-1]
alpha_map_ = map_.join(alpha_means)
keep = alpha_map_[metric].apply(lambda x: not pd.isnull(x))
alpha_map_ = alpha_map_.loc[keep]
return alpha_map_
def get_single_id_lists(map_, depths):
"""Identifies a single sample per individual
Single samples are identified based on host subject id, and then by
randomly selecting samples from available samples at the highest
rarefaction depth. If an individual has multiple samples, but only one
above the rarefaction threshold, the sample with the higher sequencing
depth is selected, and then inherited across lists. If there are two
samples above the same threshold, the sample is selected randomly.
Parameters
----------
map_: DataFrame
A pandas dataframe where the index designates the sample ID, and
columns include 'HOST_SUBJECT_ID' for the unique individual identifier,
and 'DEPTH', containing the sequencing depth.
depths: iterable
The rarefaction depths used for analysis. Depths may be an iterable
of floats or castable strings.
Returns
-------
single_ids : dict
A dictionary keyed by rarefaction depth, where each value is a list
of samples representing a single sample from each subject.
"""
# Determines the numebr of depths and sorts the list
num_depths = len(depths)
depths = sorted(depths)
# Sets up the output
single_ids = {depth: [] for depth in depths}
single_ids['unrare'] = []
# Casts the depths column explictly to a float
map_['depth'] = map_['depth'].astype(float)
for hsi, subject in map_.groupby('HOST_SUBJECT_ID'):
# For each depth, we check to see if there is one or more samples that
# have suffecient sequences for that depth. If there is a sample
# which meets the depth requirement, one is chosen at random from
# the avaliable set of samples, and no additonal depths are considered
for depth_id, depth in enumerate(depths[::-1]):
if (subject['depth'] >= float(depth)).any():
# Chooses one sample at random
id_ = np.random.choice(
subject.loc[subject['depth'] >= float(depth)].index,
replace=False
)
single_ids[depth].append(id_)
break
# If a subject does not have suffecient sequences to meet the
# subsampling requirements for the lowest rarefaction depth,
# a sample is selected at random and included in the unrarefied list.
else:
# A sample does not exist at the lowest depth. However, it falls
# into the full list
single_ids['unrare'].append(
np.random.choice(subject.index, replace=False)
)
# Updates the list so lower rarefaction depths inheriet the ids at higher
# depths
for idx, lowest in zip(*(np.arange(num_depths, 0, -1) - 1, depths[::-1])):
if idx == 0:
single_ids['unrare'].extend(single_ids[depth])
else:
single_ids[depths[idx - 1]].extend(single_ids[depths[idx]])
return single_ids
def collapse_taxonomy(_bt, level=5):
"""Collapses OTUs by taxonomy
Parameters
----------
_bt : biom table
Table to collapse
level : int, optional
Level to collapse to. 0=kingdom, 1=phylum,...,5=genus, 6=species
Default 5
Returns
-------
biom table
Collapsed biom table
Citations
---------
[1] http://biom-format.org/documentation/table_objects.html
"""
def collapse_f(id_, md):
return '; '.join(md['taxonomy'][:level + 1])
collapsed = _bt.collapse(collapse_f, axis='observation', norm=False)
return collapsed
def collapse_full(_bt):
"""Collapses full biom table to median of each OTU
Parameters
----------
_bt : biom table
Table to collapse
Returns
-------
biom table
Collapsed biom table, one sample containing median of each OTU,
normalized.
"""
num_obs = len(_bt.ids(axis='observation'))
table = Table(np.array(
[np.median(v) for v in _bt.iter_data(axis='observation')]).reshape(
(num_obs, 1)),
_bt.ids(axis='observation'), ['average'],
observation_metadata=_bt.metadata(axis='observation'))
table.norm(inplace=True)
return table
|
biocore/American-Gut
|
americangut/util.py
|
Python
|
bsd-3-clause
| 19,771
|
[
"ADF"
] |
8f43ed4341aeab17ee4c53826fd3966a9031df9568e91ed8b6a5c33db120b58e
|
"""Tests of tools for setting up interactive IPython sessions."""
import ast
from diofant.interactive.session import IntegerDivisionWrapper
__all__ = ()
def test_IntegerDivisionWrapper():
tree = ast.parse('1/3')
tree2 = ast.parse('Rational(1, 3)')
dump = ast.dump(tree2)
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == dump
tree = ast.parse('1 + 3')
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == ast.dump(tree)
tree = ast.parse('-1/3')
tree2 = ast.parse('Rational(-1, 3)')
dump = ast.dump(tree2)
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == dump
tree = ast.parse('2**3/7')
tree2 = ast.parse('Rational(2**3, 7)')
dump = ast.dump(tree2)
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == ast.dump(tree)
tree = ast.parse('(3 + 5)/7')
tree2 = ast.parse('Rational(3 + 5, 7)')
dump = ast.dump(tree2)
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == ast.dump(tree)
tree = ast.parse('2**x/3')
tree_new = IntegerDivisionWrapper().visit(tree)
assert ast.dump(tree_new) == ast.dump(tree)
|
skirpichev/omg
|
diofant/tests/interactive/test_ipython.py
|
Python
|
bsd-3-clause
| 1,238
|
[
"VisIt"
] |
9e66c0d3a58953e499b61adbce6228534d8f4470c51e2b23ea2540c3c8955500
|
"""
@name: PyHouse/src/Modules/Computer/Communication/_test/test_communication.py
@author: D. Brian Kimmel
@contact: d.briankimmel@gmail.com
@copyright: 2016-2017 by D. Brian Kimmel
@date: Created on May 30, 2016
@licencse: MIT License
@summary:
Passed all 5 tests - DBK - 2017-01-19
"""
__updated__ = '2018-02-12'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files and modules.
from test.xml_data import XML_LONG, TESTING_PYHOUSE
from test.testing_mixin import SetupPyHouseObj
from Modules.Computer.test.xml_computer import \
TESTING_COMPUTER_DIVISION
from Modules.Computer.Communication.test.xml_communications import \
XML_COMMUNICATION, \
TESTING_COMMUNICATION_SECTION, \
TESTING_EMAIL_SECTION
# from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_communications')
class A1_Setup(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_PyHouse(self):
""" Be sure that the XML contains the right stuff.
"""
self.assertEqual(self.m_pyhouse_obj.Computer.Communication, {})
def test_02_FindXML(self):
""" Be sure that the XML contains the right stuff.
"""
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.computer_div.tag, TESTING_COMPUTER_DIVISION)
self.assertEqual(self.m_xml.email_sect.tag, TESTING_EMAIL_SECTION)
class A2_Xml(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring('<x />'))
pass
def test_01_Raw(self):
l_raw = XML_COMMUNICATION
# print(l_raw)
self.assertEqual(l_raw[:22], '<CommunicationSection>')
def test_02_Parsed(self):
l_xml = ET.fromstring(XML_COMMUNICATION)
# print(l_xml)
self.assertEqual(l_xml.tag, TESTING_COMMUNICATION_SECTION)
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Communication/_test/test_communication.py
|
Python
|
mit
| 2,303
|
[
"Brian"
] |
5b0b05bc5329e0372bede08633bff5321077805b40761e19c3c3f0a299ad5c2c
|
from future import standard_library
standard_library.install_aliases()
import sst
import sst.actions
from urllib.parse import urlparse
sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)
# haven't visited a url yet, so all assert_url* fail
sst.actions.fails(
sst.actions.assert_url, 'http://localhost:%s/' % sst.DEVSERVER_PORT)
sst.actions.fails(
sst.actions.assert_url_contains, 'localhost')
sst.actions.fails(
sst.actions.assert_url_network_location, 'localhost')
# now visit a url and test assertions
sst.actions.go_to('%sbegin' % sst.actions.get_base_url())
sst.actions.assert_url('/begin')
sst.actions.assert_url('/begin/')
sst.actions.assert_url('begin/')
sst.actions.assert_url('http://localhost:%s/begin' % sst.DEVSERVER_PORT)
sst.actions.assert_url('http://localhost:%s/begin/' % sst.DEVSERVER_PORT)
sst.actions.assert_url(urlparse('http://wrongurl/begin').path)
sst.actions.fails(sst.actions.assert_url, 'http://wrongurl/begin')
sst.actions.assert_url_contains('http://localhost:%s/begin'
% sst.DEVSERVER_PORT)
sst.actions.assert_url_contains('localhost:%s' % sst.DEVSERVER_PORT)
sst.actions.assert_url_contains('.*/begin', regex=True)
sst.actions.assert_url_contains('http://.*/begin', regex=True)
sst.actions.assert_url_contains('.*//localhost', regex=True)
sst.actions.assert_url_contains('lo[C|c]a.*host', regex=True)
sst.actions.fails(sst.actions.assert_url_contains, 'foobar')
sst.actions.fails(sst.actions.assert_url_contains, 'foobar', regex=True)
sst.actions.assert_url_network_location('localhost:%s' % sst.DEVSERVER_PORT)
sst.actions.fails(sst.actions.assert_url_network_location, 'localhost')
sst.actions.fails(sst.actions.assert_url_network_location, '')
# visit url with query strings and fragments, then test assertions
sst.actions.go_to('/begin?query_string#fragment_id')
sst.actions.assert_url(
'http://localhost:%s/begin?query_string#fragment_id' % sst.DEVSERVER_PORT)
sst.actions.assert_url('/begin?query_string#fragment_id')
sst.actions.fails(sst.actions.assert_url, '/begin')
sst.actions.fails(
sst.actions.assert_url, 'http://localhost:%s/begin' % sst.DEVSERVER_PORT)
|
DramaFever/sst
|
src/sst/selftests/assert_urls.py
|
Python
|
apache-2.0
| 2,179
|
[
"VisIt"
] |
a8181dfc3128bd8b450c9bfbf7a3e66d7bcac49b8bfd36f062b3215116e07b75
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PatientInformation.notes'
db.add_column('patient_patientinformation', 'notes',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PatientInformation.notes'
db.delete_column('patient_patientinformation', 'notes')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.GynaecologicalHistory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PresentMedicalHistory']"})
},
'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"})
},
'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"})
},
'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'maternal_complication': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'perinatal_complication': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'patient.ultrasoundscanningimage': {
'Meta': {'object_name': 'UltrasoundScanningImage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'us': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['patient.UltrasoundScanning']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
aazhbd/medical_info01
|
patient/migrations/0032_auto__add_field_patientinformation_notes.py
|
Python
|
bsd-3-clause
| 30,204
|
[
"VisIt"
] |
d6d64808a6e74ba7e0c0c6dc2e4d424afd295eaa1ee9cf3381cfd85acbfb348b
|
# list Map scans from saved 1mm data
from scipy.io import netcdf
from glob import glob
from datetime import datetime, timedelta
import numpy as np
files = glob('/data_lmt/vlbi1mm/vlbi1mm_2016-*.nc')
for fname in sorted(files):
try:
nc = netcdf.netcdf_file(fname)
v = nc.variables
pgm = ''.join(v['Header.Dcs.ObsPgm'].data).strip()
if pgm == 'Map':
rate = v['Header.Map.ScanRate'].data * (180/np.pi) * (3600.)
source = ''.join(v['Header.Source.SourceName'].data).strip()
elif pgm == 'On':
rate = 0
source = 'On'
else:
continue
onum = v['Header.Dcs.ObsNum'].data
time = v['Data.Sky.Time'].data
duration = time[-1] - time[0]
date = str(v['Header.TimePlace.UTDate'].data)
year = datetime.strptime(date[:4], "%Y")
fyear = float(date[4:])
dt = timedelta(days = fyear * 365)
day = year + dt
daystr = datetime.strftime(day, "%m/%d %H:%M UT")
print "%5d %s %10s %4.0f %4.0f" % (onum, daystr, source, duration, rate)
except:
None
|
sao-eht/lmtscripts
|
2017/list2016maps.py
|
Python
|
mit
| 976
|
[
"NetCDF"
] |
29abe5c191076c3514de4a30ffba27e8dc52a647ccb3f78d4f13d2dd8bbfae06
|
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
import numpy
from functools import reduce
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import scf
from pyscf.cc import gccsd
from pyscf.cc import ccsd
from pyscf.pbc.mp.kmp2 import (get_frozen_mask, get_nmo, get_nocc,
padded_mo_coeff, padding_k_idx) # noqa
from pyscf.pbc.cc import kintermediates as imdk
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.lib import kpts_helper
DEBUG = False
#
# FIXME: When linear dependence is found in KHF and handled by function
# pyscf.scf.addons.remove_linear_dep_, different k-point may have different
# number of orbitals.
#
#einsum = numpy.einsum
einsum = lib.einsum
def energy(cc, t1, t2, eris):
nkpts, nocc, nvir = t1.shape
fock = eris.fock
eris_oovv = eris.oovv.copy()
e = 0.0 + 0j
for ki in range(nkpts):
e += einsum('ia,ia', fock[ki, :nocc, nocc:], t1[ki, :, :])
t1t1 = numpy.zeros(shape=t2.shape, dtype=t2.dtype)
for ki in range(nkpts):
ka = ki
for kj in range(nkpts):
#kb = kj
t1t1[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1[ki, :, :], t1[kj, :, :])
tau = t2 + 2 * t1t1
e += 0.25 * numpy.dot(tau.flatten(), eris_oovv.flatten())
e /= nkpts
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in KCCSD energy %s', e)
return e.real
def update_amps(cc, t1, t2, eris):
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(cc.stdout, cc.verbose)
nkpts, nocc, nvir = t1.shape
fock = eris.fock
mo_e_o = [e[:nocc] for e in eris.mo_energy]
mo_e_v = [e[nocc:] + cc.level_shift for e in eris.mo_energy]
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(cc, kind="split")
fov = fock[:, :nocc, nocc:].copy()
# Get the momentum conservation array
# Note: chemist's notation for momentum conserving t2(ki,kj,ka,kb), even though
# integrals are in physics notation
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
tau = imdk.make_tau(cc, t2, t1, t1, kconserv)
Fvv = imdk.cc_Fvv(cc, t1, t2, eris, kconserv)
Foo = imdk.cc_Foo(cc, t1, t2, eris, kconserv)
Fov = imdk.cc_Fov(cc, t1, t2, eris, kconserv)
Woooo = imdk.cc_Woooo(cc, t1, t2, eris, kconserv)
Wvvvv = imdk.cc_Wvvvv(cc, t1, t2, eris, kconserv)
Wovvo = imdk.cc_Wovvo(cc, t1, t2, eris, kconserv)
# Move energy terms to the other side
for k in range(nkpts):
Foo[k][numpy.diag_indices(nocc)] -= mo_e_o[k]
Fvv[k][numpy.diag_indices(nvir)] -= mo_e_v[k]
eris_ovvo = numpy.zeros(shape=(nkpts, nkpts, nkpts, nocc, nvir, nvir, nocc), dtype=t2.dtype)
eris_oovo = numpy.zeros(shape=(nkpts, nkpts, nkpts, nocc, nocc, nvir, nocc), dtype=t2.dtype)
eris_vvvo = numpy.zeros(shape=(nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc), dtype=t2.dtype)
for km, kb, ke in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
# <mb||je> -> -<mb||ej>
eris_ovvo[km, kb, ke] = -eris.ovov[km, kb, kj].transpose(0, 1, 3, 2)
# <mn||je> -> -<mn||ej>
# let kb = kn as a dummy variable
eris_oovo[km, kb, ke] = -eris.ooov[km, kb, kj].transpose(0, 1, 3, 2)
# <ma||be> -> - <be||am>*
# let kj = ka as a dummy variable
kj = kconserv[km, ke, kb]
eris_vvvo[ke, kj, kb] = -eris.ovvv[km, kb, ke].transpose(2, 3, 1, 0).conj()
# T1 equation
t1new = numpy.zeros(shape=t1.shape, dtype=t1.dtype)
for ka in range(nkpts):
ki = ka
t1new[ka] += numpy.array(fov[ka, :, :]).conj()
t1new[ka] += einsum('ie,ae->ia', t1[ka], Fvv[ka])
t1new[ka] += -einsum('ma,mi->ia', t1[ka], Foo[ka])
for km in range(nkpts):
t1new[ka] += einsum('imae,me->ia', t2[ka, km, ka], Fov[km])
t1new[ka] += -einsum('nf,naif->ia', t1[km], eris.ovov[km, ka, ki])
for kn in range(nkpts):
ke = kconserv[km, ki, kn]
t1new[ka] += -0.5 * einsum('imef,maef->ia', t2[ki, km, ke], eris.ovvv[km, ka, ke])
t1new[ka] += -0.5 * einsum('mnae,nmei->ia', t2[km, kn, ka], eris_oovo[kn, km, ke])
# T2 equation
t2new = numpy.array(eris.oovv).conj()
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# Chemist's notation for momentum conserving t2(ki,kj,ka,kb)
kb = kconserv[ki, ka, kj]
Ftmp = Fvv[kb] - 0.5 * einsum('mb,me->be', t1[kb], Fov[kb])
tmp = einsum('ijae,be->ijab', t2[ki, kj, ka], Ftmp)
t2new[ki, kj, ka] += tmp
#t2new[ki,kj,kb] -= tmp.transpose(0,1,3,2)
Ftmp = Fvv[ka] - 0.5 * einsum('ma,me->ae', t1[ka], Fov[ka])
tmp = einsum('ijbe,ae->ijab', t2[ki, kj, kb], Ftmp)
t2new[ki, kj, ka] -= tmp
Ftmp = Foo[kj] + 0.5 * einsum('je,me->mj', t1[kj], Fov[kj])
tmp = einsum('imab,mj->ijab', t2[ki, kj, ka], Ftmp)
t2new[ki, kj, ka] -= tmp
#t2new[kj,ki,ka] += tmp.transpose(1,0,2,3)
Ftmp = Foo[ki] + 0.5 * einsum('ie,me->mi', t1[ki], Fov[ki])
tmp = einsum('jmab,mi->ijab', t2[kj, ki, ka], Ftmp)
t2new[ki, kj, ka] += tmp
for km in range(nkpts):
# Wminj
# - km - kn + ka + kb = 0
# => kn = ka - km + kb
kn = kconserv[ka, km, kb]
t2new[ki, kj, ka] += 0.5 * einsum('mnab,mnij->ijab', tau[km, kn, ka], Woooo[km, kn, ki])
ke = km
t2new[ki, kj, ka] += 0.5 * einsum('ijef,abef->ijab', tau[ki, kj, ke], Wvvvv[ka, kb, ke])
# Wmbej
# - km - kb + ke + kj = 0
# => ke = km - kj + kb
ke = kconserv[km, kj, kb]
tmp = einsum('imae,mbej->ijab', t2[ki, km, ka], Wovvo[km, kb, ke])
# - km - kb + ke + kj = 0
# => ke = km - kj + kb
#
# t[i,e] => ki = ke
# t[m,a] => km = ka
if km == ka and ke == ki:
tmp -= einsum('ie,ma,mbej->ijab', t1[ki], t1[km], eris_ovvo[km, kb, ke])
t2new[ki, kj, ka] += tmp
t2new[ki, kj, kb] -= tmp.transpose(0, 1, 3, 2)
t2new[kj, ki, ka] -= tmp.transpose(1, 0, 2, 3)
t2new[kj, ki, kb] += tmp.transpose(1, 0, 3, 2)
ke = ki
tmp = einsum('ie,abej->ijab', t1[ki], eris_vvvo[ka, kb, ke])
t2new[ki, kj, ka] += tmp
# P(ij) term
ke = kj
tmp = einsum('je,abei->ijab', t1[kj], eris_vvvo[ka, kb, ke])
t2new[ki, kj, ka] -= tmp
km = ka
tmp = einsum('ma,mbij->ijab', t1[ka], eris.ovoo[km, kb, ki])
t2new[ki, kj, ka] -= tmp
# P(ab) term
km = kb
tmp = einsum('mb,maij->ijab', t1[kb], eris.ovoo[km, ka, ki])
t2new[ki, kj, ka] += tmp
for ki in range(nkpts):
ka = ki
# Remove zero/padded elements from denominator
eia = LARGE_DENOM * numpy.ones((nocc, nvir), dtype=eris.mo_energy[0].dtype)
n0_ovp_ia = numpy.ix_(nonzero_opadding[ki], nonzero_vpadding[ka])
eia[n0_ovp_ia] = (mo_e_o[ki][:,None] - mo_e_v[ka])[n0_ovp_ia]
t1new[ki] /= eia
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# For LARGE_DENOM, see t1new update above
eia = LARGE_DENOM * numpy.ones((nocc, nvir), dtype=eris.mo_energy[0].dtype)
n0_ovp_ia = numpy.ix_(nonzero_opadding[ki], nonzero_vpadding[ka])
eia[n0_ovp_ia] = (mo_e_o[ki][:,None] - mo_e_v[ka])[n0_ovp_ia]
ejb = LARGE_DENOM * numpy.ones((nocc, nvir), dtype=eris.mo_energy[0].dtype)
n0_ovp_jb = numpy.ix_(nonzero_opadding[kj], nonzero_vpadding[kb])
ejb[n0_ovp_jb] = (mo_e_o[kj][:,None] - mo_e_v[kb])[n0_ovp_jb]
eijab = eia[:, None, :, None] + ejb[:, None, :]
t2new[ki, kj, ka] /= eijab
time0 = log.timer_debug1('update t1 t2', *time0)
return t1new, t2new
def spatial2spin(tx, orbspin, kconserv):
'''Convert T1/T2 of spatial orbital representation to T1/T2 of
spin-orbital representation
'''
if isinstance(tx, numpy.ndarray) and tx.ndim == 3:
# KRCCSD t1 amplitudes
return spatial2spin((tx,tx), orbspin, kconserv)
elif isinstance(tx, numpy.ndarray) and tx.ndim == 7:
# KRCCSD t2 amplitudes
t2aa = numpy.zeros_like(tx)
nkpts = t2aa.shape[2]
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki,ka,kj]
t2aa[ki,kj,ka] = tx[ki,kj,ka] - tx[ki,kj,kb].transpose(0,1,3,2)
return spatial2spin((t2aa,tx,t2aa), orbspin, kconserv)
elif len(tx) == 2: # KUCCSD t1
t1a, t1b = tx
nocc_a, nvir_a = t1a.shape[1:]
nocc_b, nvir_b = t1b.shape[1:]
else: # KUCCSD t2
t2aa, t2ab, t2bb = tx
nocc_a, nocc_b, nvir_a, nvir_b = t2ab.shape[3:]
nkpts = len(orbspin)
nocc = nocc_a + nocc_b
nvir = nvir_a + nvir_b
idxoa = [numpy.where(orbspin[k][:nocc] == 0)[0] for k in range(nkpts)]
idxob = [numpy.where(orbspin[k][:nocc] == 1)[0] for k in range(nkpts)]
idxva = [numpy.where(orbspin[k][nocc:] == 0)[0] for k in range(nkpts)]
idxvb = [numpy.where(orbspin[k][nocc:] == 1)[0] for k in range(nkpts)]
if len(tx) == 2: # t1
t1 = numpy.zeros((nkpts,nocc,nvir), dtype=t1a.dtype)
for k in range(nkpts):
lib.takebak_2d(t1[k], t1a[k], idxoa[k], idxva[k])
lib.takebak_2d(t1[k], t1b[k], idxob[k], idxvb[k])
t1 = lib.tag_array(t1, orbspin=orbspin)
return t1
else:
t2 = numpy.zeros((nkpts,nkpts,nkpts,nocc**2,nvir**2), dtype=t2aa.dtype)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki,ka,kj]
idxoaa = idxoa[ki][:,None] * nocc + idxoa[kj]
idxoab = idxoa[ki][:,None] * nocc + idxob[kj]
idxoba = idxob[kj][:,None] * nocc + idxoa[ki]
idxobb = idxob[ki][:,None] * nocc + idxob[kj]
idxvaa = idxva[ka][:,None] * nvir + idxva[kb]
idxvab = idxva[ka][:,None] * nvir + idxvb[kb]
idxvba = idxvb[kb][:,None] * nvir + idxva[ka]
idxvbb = idxvb[ka][:,None] * nvir + idxvb[kb]
tmp2aa = t2aa[ki,kj,ka].reshape(nocc_a*nocc_a,nvir_a*nvir_a)
tmp2bb = t2bb[ki,kj,ka].reshape(nocc_b*nocc_b,nvir_b*nvir_b)
tmp2ab = t2ab[ki,kj,ka].reshape(nocc_a*nocc_b,nvir_a*nvir_b)
lib.takebak_2d(t2[ki,kj,ka], tmp2aa, idxoaa.ravel() , idxvaa.ravel() )
lib.takebak_2d(t2[ki,kj,ka], tmp2bb, idxobb.ravel() , idxvbb.ravel() )
lib.takebak_2d(t2[ki,kj,ka], tmp2ab, idxoab.ravel() , idxvab.ravel() )
lib.takebak_2d(t2[kj,ki,kb], tmp2ab, idxoba.T.ravel(), idxvba.T.ravel())
abba = -tmp2ab
lib.takebak_2d(t2[ki,kj,kb], abba, idxoab.ravel() , idxvba.T.ravel())
lib.takebak_2d(t2[kj,ki,ka], abba, idxoba.T.ravel(), idxvab.ravel() )
t2 = t2.reshape(nkpts,nkpts,nkpts,nocc,nocc,nvir,nvir)
t2 = lib.tag_array(t2, orbspin=orbspin)
return t2
def spin2spatial(tx, orbspin, kconserv):
if tx.ndim == 3: # t1
nocc, nvir = tx.shape[1:]
else:
nocc, nvir = tx.shape[4:6]
nkpts = len(tx)
idxoa = [numpy.where(orbspin[k][:nocc] == 0)[0] for k in range(nkpts)]
idxob = [numpy.where(orbspin[k][:nocc] == 1)[0] for k in range(nkpts)]
idxva = [numpy.where(orbspin[k][nocc:] == 0)[0] for k in range(nkpts)]
idxvb = [numpy.where(orbspin[k][nocc:] == 1)[0] for k in range(nkpts)]
nocc_a = len(idxoa[0])
nocc_b = len(idxob[0])
nvir_a = len(idxva[0])
nvir_b = len(idxvb[0])
if tx.ndim == 3: # t1
t1a = numpy.zeros((nkpts,nocc_a,nvir_a), dtype=tx.dtype)
t1b = numpy.zeros((nkpts,nocc_b,nvir_b), dtype=tx.dtype)
for k in range(nkpts):
lib.take_2d(tx[k], idxoa[k], idxva[k], out=t1a[k])
lib.take_2d(tx[k], idxob[k], idxvb[k], out=t1b[k])
return t1a, t1b
else:
t2aa = numpy.zeros((nkpts,nkpts,nkpts,nocc_a,nocc_a,nvir_a,nvir_a), dtype=tx.dtype)
t2ab = numpy.zeros((nkpts,nkpts,nkpts,nocc_a,nocc_b,nvir_a,nvir_b), dtype=tx.dtype)
t2bb = numpy.zeros((nkpts,nkpts,nkpts,nocc_b,nocc_b,nvir_b,nvir_b), dtype=tx.dtype)
t2 = tx.reshape(nkpts,nkpts,nkpts,nocc**2,nvir**2)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki,ka,kj]
idxoaa = idxoa[ki][:,None] * nocc + idxoa[kj]
idxoab = idxoa[ki][:,None] * nocc + idxob[kj]
idxobb = idxob[ki][:,None] * nocc + idxob[kj]
idxvaa = idxva[ka][:,None] * nvir + idxva[kb]
idxvab = idxva[ka][:,None] * nvir + idxvb[kb]
idxvbb = idxvb[ka][:,None] * nvir + idxvb[kb]
lib.take_2d(t2[ki,kj,ka], idxoaa.ravel(), idxvaa.ravel(), out=t2aa[ki,kj,ka])
lib.take_2d(t2[ki,kj,ka], idxobb.ravel(), idxvbb.ravel(), out=t2bb[ki,kj,ka])
lib.take_2d(t2[ki,kj,ka], idxoab.ravel(), idxvab.ravel(), out=t2ab[ki,kj,ka])
return t2aa, t2ab, t2bb
class GCCSD(gccsd.GCCSD):
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert (isinstance(mf, scf.khf.KSCF))
if not isinstance(mf, scf.kghf.KGHF):
mf = scf.addons.convert_to_ghf(mf)
self.kpts = mf.kpts
self.khelper = kpts_helper.KptsHelper(mf.cell, mf.kpts)
gccsd.GCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
@property
def nkpts(self):
return len(self.kpts)
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def dump_flags(self, verbose=None):
logger.info(self, '\n')
logger.info(self, '******** PBC CC flags ********')
gccsd.GCCSD.dump_flags(self, verbose)
return self
def init_amps(self, eris):
time0 = logger.process_clock(), logger.perf_counter()
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
mo_e_o = [eris.mo_energy[k][:nocc] for k in range(nkpts)]
mo_e_v = [eris.mo_energy[k][nocc:] for k in range(nkpts)]
t1 = numpy.zeros((nkpts, nocc, nvir), dtype=numpy.complex128)
t2 = numpy.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=numpy.complex128)
self.emp2 = 0
eris_oovv = eris.oovv.copy()
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(self, kind="split")
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# For LARGE_DENOM, see t1new update above
eia = LARGE_DENOM * numpy.ones((nocc, nvir), dtype=eris.mo_energy[0].dtype)
n0_ovp_ia = numpy.ix_(nonzero_opadding[ki], nonzero_vpadding[ka])
eia[n0_ovp_ia] = (mo_e_o[ki][:,None] - mo_e_v[ka])[n0_ovp_ia]
ejb = LARGE_DENOM * numpy.ones((nocc, nvir), dtype=eris.mo_energy[0].dtype)
n0_ovp_jb = numpy.ix_(nonzero_opadding[kj], nonzero_vpadding[kb])
ejb[n0_ovp_jb] = (mo_e_o[kj][:,None] - mo_e_v[kb])[n0_ovp_jb]
eijab = eia[:, None, :, None] + ejb[:, None, :]
t2[ki, kj, ka] = eris_oovv[ki, kj, ka] / eijab
t2 = numpy.conj(t2)
self.emp2 = 0.25 * numpy.einsum('pqrijab,pqrijab', t2, eris_oovv).real
self.emp2 /= nkpts
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2.real)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
def ccsd(self, t1=None, t2=None, eris=None, **kwargs):
if eris is None: eris = self.ao2mo(self.mo_coeff)
e_corr, self.t1, self.t2 = ccsd.CCSD.ccsd(self, t1, t2, eris)
if getattr(eris, 'orbspin', None) is not None:
self.t1 = lib.tag_array(self.t1, orbspin=eris.orbspin)
self.t2 = lib.tag_array(self.t2, orbspin=eris.orbspin)
return e_corr, self.t1, self.t2
update_amps = update_amps
energy = energy
def ao2mo(self, mo_coeff=None):
nkpts = self.nkpts
nmo = self.nmo
mem_incore = nkpts**3 * nmo**4 * 8 / 1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now < self.max_memory) or self.mol.incore_anyway:
return _make_eris_incore(self, mo_coeff)
else:
raise NotImplementedError
def ccsd_t(self, t1=None, t2=None, eris=None):
from pyscf.pbc.cc import kccsd_t
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
return kccsd_t.kernel(self, eris, t1, t2, self.verbose)
def amplitudes_to_vector(self, t1, t2):
return numpy.hstack((t1.ravel(), t2.ravel()))
def vector_to_amplitudes(self, vec, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
nvir = nmo - nocc
nkpts = self.nkpts
nov = nkpts * nocc * nvir
t1 = vec[:nov].reshape(nkpts, nocc, nvir)
t2 = vec[nov:].reshape(nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir)
return t1, t2
def spatial2spin(self, tx, orbspin=None, kconserv=None):
if orbspin is None:
if getattr(self.mo_coeff[0], 'orbspin', None) is not None:
orbspin = [self.mo_coeff[k].orbspin[idx]
for k, idx in enumerate(self.get_frozen_mask())]
else:
orbspin = numpy.zeros((self.nkpts,self.nmo), dtype=int)
orbspin[:,1::2] = 1
if kconserv is None:
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
return spatial2spin(tx, orbspin, kconserv)
def spin2spatial(self, tx, orbspin=None, kconserv=None):
if orbspin is None:
if getattr(self.mo_coeff[0], 'orbspin', None) is not None:
orbspin = [self.mo_coeff[k].orbspin[idx]
for k, idx in enumerate(self.get_frozen_mask())]
else:
orbspin = numpy.zeros((self.nkpts,self.nmo), dtype=int)
orbspin[:,1::2] = 1
if kconserv is None:
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
return spin2spatial(tx, orbspin, kconserv)
def from_uccsd(self, t1, t2, orbspin=None):
return self.spatial2spin(t1, orbspin), self.spatial2spin(t2, orbspin)
def to_uccsd(self, t1, t2, orbspin=None):
return spin2spatial(t1, orbspin), spin2spatial(t2, orbspin)
CCSD = KCCSD = KGCCSD = GCCSD
def _make_eris_incore(cc, mo_coeff=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
log = logger.Logger(cc.stdout, cc.verbose)
cput0 = (logger.process_clock(), logger.perf_counter())
eris = gccsd._PhysicistsERIs()
cell = cc._scf.cell
kpts = cc.kpts
nkpts = cc.nkpts
nocc = cc.nocc
nmo = cc.nmo
eris.nocc = nocc
#if any(nocc != numpy.count_nonzero(cc._scf.mo_occ[k] > 0) for k in range(nkpts)):
# raise NotImplementedError('Different occupancies found for different k-points')
if mo_coeff is None:
mo_coeff = cc.mo_coeff
nao = mo_coeff[0].shape[0]
dtype = mo_coeff[0].dtype
moidx = get_frozen_mask(cc)
nocc_per_kpt = numpy.asarray(get_nocc(cc, per_kpoint=True))
nmo_per_kpt = numpy.asarray(get_nmo(cc, per_kpoint=True))
padded_moidx = []
for k in range(nkpts):
kpt_nocc = nocc_per_kpt[k]
kpt_nvir = nmo_per_kpt[k] - kpt_nocc
kpt_padded_moidx = numpy.concatenate((numpy.ones(kpt_nocc, dtype=numpy.bool),
numpy.zeros(nmo - kpt_nocc - kpt_nvir, dtype=numpy.bool),
numpy.ones(kpt_nvir, dtype=numpy.bool)))
padded_moidx.append(kpt_padded_moidx)
eris.mo_coeff = []
eris.orbspin = []
# Generate the molecular orbital coefficients with the frozen orbitals masked.
# Each MO is tagged with orbspin, a list of 0's and 1's that give the overall
# spin of each MO.
#
# Here we will work with two index arrays; one is for our original (small) moidx
# array while the next is for our new (large) padded array.
for k in range(nkpts):
kpt_moidx = moidx[k]
kpt_padded_moidx = padded_moidx[k]
mo = numpy.zeros((nao, nmo), dtype=dtype)
mo[:, kpt_padded_moidx] = mo_coeff[k][:, kpt_moidx]
if getattr(mo_coeff[k], 'orbspin', None) is not None:
orbspin_dtype = mo_coeff[k].orbspin[kpt_moidx].dtype
orbspin = numpy.zeros(nmo, dtype=orbspin_dtype)
orbspin[kpt_padded_moidx] = mo_coeff[k].orbspin[kpt_moidx]
mo = lib.tag_array(mo, orbspin=orbspin)
eris.orbspin.append(orbspin)
# FIXME: What if the user freezes all up spin orbitals in
# an RHF calculation? The number of electrons will still be
# even.
else: # guess orbital spin - assumes an RHF calculation
assert (numpy.count_nonzero(kpt_moidx) % 2 == 0)
orbspin = numpy.zeros(mo.shape[1], dtype=int)
orbspin[1::2] = 1
mo = lib.tag_array(mo, orbspin=orbspin)
eris.orbspin.append(orbspin)
eris.mo_coeff.append(mo)
# Re-make our fock MO matrix elements from density and fock AO
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
with lib.temporary_env(cc._scf, exxdiv=None):
# _scf.exxdiv affects eris.fock. HF exchange correction should be
# excluded from the Fock matrix.
vhf = cc._scf.get_veff(cell, dm)
fockao = cc._scf.get_hcore() + vhf
eris.fock = numpy.asarray([reduce(numpy.dot, (mo.T.conj(), fockao[k], mo))
for k, mo in enumerate(eris.mo_coeff)])
eris.e_hf = cc._scf.energy_tot(dm=dm, vhf=vhf)
eris.mo_energy = [eris.fock[k].diagonal().real for k in range(nkpts)]
# Add HFX correction in the eris.mo_energy to improve convergence in
# CCSD iteration. It is useful for the 2D systems since their occupied and
# the virtual orbital energies may overlap which may lead to numerical
# issue in the CCSD iterations.
# FIXME: Whether to add this correction for other exxdiv treatments?
# Without the correction, MP2 energy may be largely off the correct value.
madelung = tools.madelung(cell, kpts)
eris.mo_energy = [_adjust_occ(mo_e, nocc, -madelung)
for k, mo_e in enumerate(eris.mo_energy)]
# Get location of padded elements in occupied and virtual space.
nocc_per_kpt = get_nocc(cc, per_kpoint=True)
nonzero_padding = padding_k_idx(cc, kind="joint")
# Check direct and indirect gaps for possible issues with CCSD convergence.
mo_e = [eris.mo_energy[kp][nonzero_padding[kp]] for kp in range(nkpts)]
mo_e = numpy.sort([y for x in mo_e for y in x]) # Sort de-nested array
gap = mo_e[numpy.sum(nocc_per_kpt)] - mo_e[numpy.sum(nocc_per_kpt)-1]
if gap < 1e-5:
logger.warn(cc, 'HOMO-LUMO gap %s too small for KCCSD. '
'May cause issues in convergence.', gap)
kconserv = kpts_helper.get_kconserv(cell, kpts)
if getattr(mo_coeff[0], 'orbspin', None) is None:
# The bottom nao//2 coefficients are down (up) spin while the top are up (down).
mo_a_coeff = [mo[:nao // 2] for mo in eris.mo_coeff]
mo_b_coeff = [mo[nao // 2:] for mo in eris.mo_coeff]
eri = numpy.empty((nkpts, nkpts, nkpts, nmo, nmo, nmo, nmo), dtype=numpy.complex128)
fao2mo = cc._scf.with_df.ao2mo
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp, kq, kr]
eri_kpt = fao2mo(
(mo_a_coeff[kp], mo_a_coeff[kq], mo_a_coeff[kr], mo_a_coeff[ks]),
(kpts[kp], kpts[kq], kpts[kr], kpts[ks]),
compact=False)
eri_kpt += fao2mo(
(mo_b_coeff[kp], mo_b_coeff[kq], mo_b_coeff[kr], mo_b_coeff[ks]),
(kpts[kp], kpts[kq], kpts[kr], kpts[ks]),
compact=False)
eri_kpt += fao2mo(
(mo_a_coeff[kp], mo_a_coeff[kq], mo_b_coeff[kr], mo_b_coeff[ks]),
(kpts[kp], kpts[kq], kpts[kr], kpts[ks]),
compact=False)
eri_kpt += fao2mo(
(mo_b_coeff[kp], mo_b_coeff[kq], mo_a_coeff[kr], mo_a_coeff[ks]),
(kpts[kp], kpts[kq], kpts[kr], kpts[ks]),
compact=False)
eri_kpt = eri_kpt.reshape(nmo, nmo, nmo, nmo)
eri[kp, kq, kr] = eri_kpt
else:
mo_a_coeff = [mo[:nao // 2] + mo[nao // 2:] for mo in eris.mo_coeff]
eri = numpy.empty((nkpts, nkpts, nkpts, nmo, nmo, nmo, nmo), dtype=numpy.complex128)
fao2mo = cc._scf.with_df.ao2mo
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp, kq, kr]
eri_kpt = fao2mo(
(mo_a_coeff[kp], mo_a_coeff[kq], mo_a_coeff[kr], mo_a_coeff[ks]),
(kpts[kp], kpts[kq], kpts[kr], kpts[ks]),
compact=False)
eri_kpt[(eris.orbspin[kp][:, None] != eris.orbspin[kq]).ravel()] = 0
eri_kpt[:, (eris.orbspin[kr][:, None] != eris.orbspin[ks]).ravel()] = 0
eri_kpt = eri_kpt.reshape(nmo, nmo, nmo, nmo)
eri[kp, kq, kr] = eri_kpt
# Check some antisymmetrized properties of the integrals
if DEBUG:
check_antisymm_3412(cc, cc.kpts, eri)
# Antisymmetrizing (pq|rs)-(ps|rq), where the latter integral is equal to
# (rq|ps); done since we aren't tracking the kpoint of orbital 's'
eri = eri - eri.transpose(2, 1, 0, 5, 4, 3, 6)
# Chemist -> physics notation
eri = eri.transpose(0, 2, 1, 3, 5, 4, 6)
# Set the various integrals
eris.dtype = eri.dtype
eris.oooo = eri[:, :, :, :nocc, :nocc, :nocc, :nocc].copy() / nkpts
eris.ooov = eri[:, :, :, :nocc, :nocc, :nocc, nocc:].copy() / nkpts
eris.ovoo = eri[:, :, :, :nocc, nocc:, :nocc, :nocc].copy() / nkpts
eris.oovv = eri[:, :, :, :nocc, :nocc, nocc:, nocc:].copy() / nkpts
eris.ovov = eri[:, :, :, :nocc, nocc:, :nocc, nocc:].copy() / nkpts
eris.ovvv = eri[:, :, :, :nocc, nocc:, nocc:, nocc:].copy() / nkpts
eris.vvvv = eri[:, :, :, nocc:, nocc:, nocc:, nocc:].copy() / nkpts
log.timer('CCSD integral transformation', *cput0)
return eris
def check_antisymm_3412(cc, kpts, integrals):
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
nkpts = len(kpts)
diff = 0.0
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp, kr, kq]
for p in range(integrals.shape[3]):
for q in range(integrals.shape[4]):
for r in range(integrals.shape[5]):
for s in range(integrals.shape[6]):
pqrs = integrals[kp, kq, kr, p, q, r, s]
rspq = integrals[kq, kp, kr, q, p, r, s]
cdiff = numpy.linalg.norm(pqrs - rspq).real
if diff > 1e-5:
print("AS diff = %.15g" % cdiff, pqrs, rspq, kp, kq, kr, ks, p, q, r, s)
diff = max(diff, cdiff)
print("antisymmetrization : max diff = %.15g" % diff)
if diff > 1e-5:
print("Energy cutoff (or cell.mesh) is not enough to converge AO integrals.")
return diff
def check_antisymm_12(cc, kpts, integrals):
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
nkpts = len(kpts)
diff = 0.0
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp, kr, kq]
for p in range(integrals.shape[3]):
for q in range(integrals.shape[4]):
for r in range(integrals.shape[5]):
for s in range(integrals.shape[6]):
pqrs = integrals[kp, kq, kr, p, q, r, s]
qprs = integrals[kq, kp, kr, q, p, r, s]
cdiff = numpy.linalg.norm(pqrs + qprs).real
if diff > 1e-5:
print("AS diff = %.15g" % cdiff, pqrs, qprs, kp, kq, kr, ks, p, q, r, s)
diff = max(diff, cdiff)
print("antisymmetrization : max diff = %.15g" % diff)
if diff > 1e-5:
print("Energy cutoff (or cell.mesh) is not enough to converge AO integrals.")
def check_antisymm_34(cc, kpts, integrals):
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
nkpts = len(kpts)
diff = 0.0
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp, kr, kq]
for p in range(integrals.shape[3]):
for q in range(integrals.shape[4]):
for r in range(integrals.shape[5]):
for s in range(integrals.shape[6]):
pqrs = integrals[kp, kq, kr, p, q, r, s]
pqsr = integrals[kp, kq, ks, p, q, s, r]
cdiff = numpy.linalg.norm(pqrs + pqsr).real
if diff > 1e-5:
print("AS diff = %.15g" % cdiff, pqrs, pqsr, kp, kq, kr, ks, p, q, r, s)
diff = max(diff, cdiff)
print("antisymmetrization : max diff = %.15g" % diff)
if diff > 1e-5:
print("Energy cutoff (or cell.mesh) is not enough to converge AO integrals.")
imd = imdk
class _IMDS:
# Identical to molecular rccsd_slow
def __init__(self, cc):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
self.eris = cc.eris
self.kconserv = cc.khelper.kconserv
self.made_ip_imds = False
self.made_ea_imds = False
self._made_shared_2e = False
self._fimd = None
def _make_shared_1e(self):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
self.Loo = imd.Loo(t1,t2,eris,kconserv)
self.Lvv = imd.Lvv(t1,t2,eris,kconserv)
self.Fov = imd.cc_Fov(t1,t2,eris,kconserv)
log.timer('EOM-CCSD shared one-electron intermediates', *cput0)
def _make_shared_2e(self):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
# TODO: check whether to hold Wovov Wovvo in memory
if self._fimd is None:
self._fimd = lib.H5TmpFile()
nkpts, nocc, nvir = t1.shape
self._fimd.create_dataset('ovov', (nkpts,nkpts,nkpts,nocc,nvir,nocc,nvir), t1.dtype.char)
self._fimd.create_dataset('ovvo', (nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc), t1.dtype.char)
# 2 virtuals
self.Wovov = imd.Wovov(t1,t2,eris,kconserv, self._fimd['ovov'])
self.Wovvo = imd.Wovvo(t1,t2,eris,kconserv, self._fimd['ovvo'])
self.Woovv = eris.oovv
log.timer('EOM-CCSD shared two-electron intermediates', *cput0)
def make_ip(self, ip_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ip_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
nkpts, nocc, nvir = t1.shape
self._fimd.create_dataset('oooo', (nkpts,nkpts,nkpts,nocc,nocc,nocc,nocc), t1.dtype.char)
self._fimd.create_dataset('ooov', (nkpts,nkpts,nkpts,nocc,nocc,nocc,nvir), t1.dtype.char)
self._fimd.create_dataset('ovoo', (nkpts,nkpts,nkpts,nocc,nvir,nocc,nocc), t1.dtype.char)
# 0 or 1 virtuals
if ip_partition != 'mp':
self.Woooo = imd.Woooo(t1,t2,eris,kconserv, self._fimd['oooo'])
self.Wooov = imd.Wooov(t1,t2,eris,kconserv, self._fimd['ooov'])
self.Wovoo = imd.Wovoo(t1,t2,eris,kconserv, self._fimd['ovoo'])
self.made_ip_imds = True
log.timer('EOM-CCSD IP intermediates', *cput0)
def make_ea(self, ea_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ea_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1,t2,eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
nkpts, nocc, nvir = t1.shape
self._fimd.create_dataset('vovv', (nkpts,nkpts,nkpts,nvir,nocc,nvir,nvir), t1.dtype.char)
self._fimd.create_dataset('vvvo', (nkpts,nkpts,nkpts,nvir,nvir,nvir,nocc), t1.dtype.char)
self._fimd.create_dataset('vvvv', (nkpts,nkpts,nkpts,nvir,nvir,nvir,nvir), t1.dtype.char)
# 3 or 4 virtuals
self.Wvovv = imd.Wvovv(t1,t2,eris,kconserv, self._fimd['vovv'])
if ea_partition == 'mp' and numpy.all(t1 == 0):
self.Wvvvo = imd.Wvvvo(t1,t2,eris,kconserv, self._fimd['vvvo'])
else:
self.Wvvvv = imd.Wvvvv(t1,t2,eris,kconserv, self._fimd['vvvv'])
self.Wvvvo = imd.Wvvvo(t1,t2,eris,kconserv,self.Wvvvv, self._fimd['vvvo'])
self.made_ea_imds = True
log.timer('EOM-CCSD EA intermediates', *cput0)
scf.kghf.KGHF.CCSD = lib.class_as_method(KGCCSD)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KRHF(cell, kpts=cell.make_kpts([1,1,2]), exxdiv=None)
ehf = kmf.kernel()
kmf = scf.addons.convert_to_ghf(kmf)
mycc = KGCCSD(kmf)
ecc, t1, t2 = mycc.kernel()
print(ecc - -0.155298393321855)
|
sunqm/pyscf
|
pyscf/pbc/cc/kccsd.py
|
Python
|
apache-2.0
| 35,158
|
[
"PySCF"
] |
edc3e2d0c7e858d609267f1810bf7c744f05a8f079cc5bf6c8b0a10588b294d0
|
'''
Created on Jun 2, 2011
@author: mkiyer
'''
import logging
import collections
import os
import sys
import argparse
from chimerascan import pysam
from chimerascan.bx.cluster import ClusterTree
from chimerascan.lib import config
from chimerascan.lib.base import LibraryTypes, imin2
from chimerascan.lib.sam import parse_pe_reads, pair_reads, copy_read, select_best_scoring_pairs
from chimerascan.lib.feature import TranscriptFeature
from chimerascan.lib.transcriptome_to_genome import build_tid_transcript_map, \
build_tid_transcript_genome_map, transcript_to_genome_pos
from chimerascan.lib.chimera import DiscordantTags, DISCORDANT_TAG_NAME, \
OrientationTags, ORIENTATION_TAG_NAME
def annotate_multihits(bamfh, reads, tid_tx_genome_map):
hits = set()
any_unmapped = False
for r in reads:
if r.is_unmapped:
any_unmapped = True
continue
assert r.rname in tid_tx_genome_map
# use the position that is most 5' relative to genome
left_tid, left_strand, left_pos = transcript_to_genome_pos(r.rname, r.pos, tid_tx_genome_map)
right_tid, right_strand, right_pos = transcript_to_genome_pos(r.rname, r.aend-1, tid_tx_genome_map)
tid = left_tid
pos = imin2(left_pos, right_pos)
hits.add((tid, pos))
#print r.qname, bamfh.getrname(r.rname), r.pos, bamfh.getrname(tid), pos
for i,r in enumerate(reads):
# annotate reads with 'HI', and 'IH' tags
r.tags = r.tags + [("HI",i), ("IH",len(reads)), ("NH", len(hits))]
return any_unmapped
def map_reads_to_references(pe_reads, tid_tx_map):
"""
bin reads by transcript cluster and reference (tid)
"""
refdict = collections.defaultdict(lambda: ([], []))
clusterdict = collections.defaultdict(lambda: ([], []))
for readnum, reads in enumerate(pe_reads):
for r in reads:
if r.is_unmapped:
continue
# TODO: remove assert statement
assert r.rname in tid_tx_map
# add to cluster dict
cluster_id = tid_tx_map[r.rname].cluster_id
pairs = clusterdict[cluster_id]
pairs[readnum].append(r)
# add to reference dict
pairs = refdict[r.rname]
pairs[readnum].append(r)
return refdict, clusterdict
def get_genome_orientation(r, library_type):
if library_type == LibraryTypes.FR_FIRSTSTRAND:
if r.is_read2:
return OrientationTags.FIVEPRIME
else:
return OrientationTags.THREEPRIME
elif library_type == LibraryTypes.FR_SECONDSTRAND:
if r.is_read1:
return OrientationTags.FIVEPRIME
else:
return OrientationTags.THREEPRIME
return OrientationTags.NONE
def get_gene_orientation(r, library_type):
if library_type == LibraryTypes.FR_UNSTRANDED:
if r.is_reverse:
return OrientationTags.THREEPRIME
else:
return OrientationTags.FIVEPRIME
elif library_type == LibraryTypes.FR_FIRSTSTRAND:
if r.is_read2:
return OrientationTags.FIVEPRIME
else:
return OrientationTags.THREEPRIME
elif library_type == LibraryTypes.FR_SECONDSTRAND:
if r.is_read1:
return OrientationTags.FIVEPRIME
else:
return OrientationTags.THREEPRIME
logging.error("Unknown library type %s, aborting" % (library_type))
assert False
def classify_unpaired_reads(reads, library_type):
gene_hits_5p = []
gene_hits_3p = []
for r in reads:
# this alignment is to a transcript (gene), so need
# to determine whether it is 5' or 3'
orientation = get_gene_orientation(r, library_type)
if orientation == OrientationTags.FIVEPRIME:
gene_hits_5p.append(r)
else:
gene_hits_3p.append(r)
# add a tag to the sam file describing the read orientation and
# that it is discordant
r.tags = r.tags + [(DISCORDANT_TAG_NAME, DiscordantTags.DISCORDANT_GENE),
(ORIENTATION_TAG_NAME, orientation)]
return gene_hits_5p, gene_hits_3p
def find_discordant_pairs(pe_reads, library_type):
"""
iterate through combinations of read1/read2 to predict valid
discordant read pairs
"""
# classify the reads as 5' or 3' gene alignments or genome alignments
r1_5p_gene_hits, r1_3p_gene_hits = \
classify_unpaired_reads(pe_reads[0], library_type)
r2_5p_gene_hits, r2_3p_gene_hits = \
classify_unpaired_reads(pe_reads[1], library_type)
# pair 5' and 3' gene alignments
gene_pairs = []
combos = [(r1_5p_gene_hits,r2_3p_gene_hits),
(r1_3p_gene_hits,r2_5p_gene_hits)]
for r1_list,r2_list in combos:
for r1 in r1_list:
for r2 in r2_list:
cr1 = copy_read(r1)
cr2 = copy_read(r2)
pair_reads(cr1,cr2)
gene_pairs.append((cr1,cr2))
return gene_pairs
def classify_read_pairs(pe_reads, max_isize,
library_type,
tid_tx_map):
"""
examines all the alignments of a single fragment and tries to find ways
to pair reads together.
annotates all read pairs with an integer tag corresponding to a value
in the DiscordantTags class
returns a tuple with the following lists:
1) pairs (r1,r2) aligning to genes (pairs may be discordant)
3) unpaired reads, if any
"""
# to satisfy library type reads must either be on
# same strand or opposite strands
concordant_tx_pairs = []
discordant_tx_pairs = []
concordant_cluster_pairs = []
discordant_cluster_pairs = []
#
# first, try to pair reads that map to the same transcript, or to the
# genome within the insert size range
#
same_strand = LibraryTypes.same_strand(library_type)
refdict, clusterdict = map_reads_to_references(pe_reads, tid_tx_map)
found_pair = False
for tid, tid_pe_reads in refdict.iteritems():
# check if there are alignments involving both reads in a pair
if len(tid_pe_reads[0]) == 0 or len(tid_pe_reads[1]) == 0:
# no paired alignments exist at this reference
continue
for r1 in tid_pe_reads[0]:
for r2 in tid_pe_reads[1]:
# read strands must agree with library type
strand_match = (same_strand == (r1.is_reverse == r2.is_reverse))
# these reads can be paired
found_pair = True
cr1 = copy_read(r1)
cr2 = copy_read(r2)
# this is a hit to same transcript (gene)
# pair the reads if strand comparison is correct
if strand_match:
tags = [(DISCORDANT_TAG_NAME, DiscordantTags.CONCORDANT_TX)]
concordant_tx_pairs.append((cr1,cr2))
else:
# hit to same gene with wrong strand, which
# could happen in certain wacky cases
tags = [(DISCORDANT_TAG_NAME, DiscordantTags.DISCORDANT_STRAND_TX)]
discordant_tx_pairs.append((cr1,cr2))
pair_reads(cr1,cr2,tags)
# at this point, if we have not been able to find a suitable way
# to pair the reads, then search within the transcript cluster
if not found_pair:
for cluster_id, cluster_pe_reads in clusterdict.iteritems():
# check if there are alignments involving both reads in a pair
if len(cluster_pe_reads[0]) == 0 or len(cluster_pe_reads[1]) == 0:
# no paired alignments in this transcript cluster
continue
for r1 in cluster_pe_reads[0]:
for r2 in cluster_pe_reads[1]:
# check strand compatibility
strand_match = (same_strand == (r1.is_reverse == r2.is_reverse))
# these reads can be paired
found_pair = True
cr1 = copy_read(r1)
cr2 = copy_read(r2)
if strand_match:
tags = [(DISCORDANT_TAG_NAME, DiscordantTags.CONCORDANT_GENE)]
concordant_cluster_pairs.append((cr1,cr2))
else:
tags = [(DISCORDANT_TAG_NAME, DiscordantTags.DISCORDANT_STRAND_GENE)]
discordant_cluster_pairs.append((cr1,cr2))
pair_reads(cr1,cr2,tags)
# at this point, we have tried all combinations. if any paired reads
# are concordant then return them without considering discordant reads
gene_pairs = []
if len(concordant_tx_pairs) > 0:
gene_pairs = concordant_tx_pairs
elif len(concordant_cluster_pairs) > 0:
gene_pairs = concordant_cluster_pairs
if len(gene_pairs) > 0:
return gene_pairs, []
# if no concordant reads in transcripts, return any discordant reads
# that may violate strand requirements but still remain colocalized
# on the same gene/chromosome
gene_pairs = []
if len(discordant_tx_pairs) > 0:
gene_pairs = discordant_tx_pairs
elif len(discordant_cluster_pairs) > 0:
gene_pairs = discordant_cluster_pairs
if len(gene_pairs) > 0:
return gene_pairs, []
#
# at this point, no read pairings were found so the read is
# assumed to be discordant. now we can create all valid
# combinations of read1/read2 as putative discordant read pairs
#
gene_pairs = find_discordant_pairs(pe_reads, library_type)
if len(gene_pairs) > 0:
# sort valid pairs by sum of alignment score and retain the best scoring
# pairs
gene_pairs = select_best_scoring_pairs(gene_pairs)
return gene_pairs, []
#
# no valid pairs could be found suggesting that these mappings are
# either mapping artifacts or that the current gene annotation set
# lacks annotations support this pair
#
return [], pe_reads
def write_pe_reads(bamfh, pe_reads):
for reads in pe_reads:
for r in reads:
bamfh.write(r)
def write_pairs(bamfh, pairs):
for r1,r2 in pairs:
bamfh.write(r1)
bamfh.write(r2)
def find_discordant_fragments(input_bam_file,
paired_bam_file,
unmapped_bam_file,
index_dir,
max_isize,
library_type):
"""
parses BAM file and categorizes reads into several groups:
- concordant
- discordant within gene (splicing isoforms)
- discordant between different genes (chimeras)
"""
logging.info("Finding discordant read pair combinations")
logging.debug("\tInput file: %s" % (input_bam_file))
logging.debug("\tMax insert size: '%d'" % (max_isize))
logging.debug("\tLibrary type: '%s'" % (library_type))
logging.debug("\tGene paired file: %s" % (paired_bam_file))
logging.debug("\tUnmapped file: %s" % (unmapped_bam_file))
# setup input and output files
bamfh = pysam.Samfile(input_bam_file, "rb")
genefh = pysam.Samfile(paired_bam_file, "wb", template=bamfh)
unmappedfh = pysam.Samfile(unmapped_bam_file, "wb", template=bamfh)
# read transcript features
logging.debug("Reading transcript features")
transcript_file = os.path.join(index_dir, config.TRANSCRIPT_FEATURE_FILE)
transcripts = list(TranscriptFeature.parse(open(transcript_file)))
logging.debug("Building transcript lookup tables")
# build a lookup table from bam tid index to transcript object
tid_tx_map = build_tid_transcript_map(bamfh, transcripts)
# build a transcript to genome coordinate map
tid_tx_genome_map = build_tid_transcript_genome_map(bamfh, transcripts)
logging.info("Parsing reads")
for pe_reads in parse_pe_reads(bamfh):
# add hit index and multimap information to read tags
# this function also checks for unmapped reads
any_unmapped = False
for reads in pe_reads:
any_unmapped = (any_unmapped or
annotate_multihits(bamfh, reads, tid_tx_genome_map))
if any_unmapped:
# write to output as discordant reads and continue to
# next fragment
write_pe_reads(unmappedfh, pe_reads)
continue
# examine all read pairing combinations and rule out invalid pairings
gene_pairs, unpaired_reads = classify_read_pairs(pe_reads,
max_isize,
library_type,
tid_tx_map)
if len(gene_pairs) > 0:
write_pairs(genefh, gene_pairs)
# TODO: do something with unpaired discordant reads?
genefh.close()
unmappedfh.close()
bamfh.close()
logging.info("Finished pairing reads")
return config.JOB_SUCCESS
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('--max-fragment-length', dest="max_fragment_length",
type=int, default=1000)
parser.add_argument('--library', dest="library_type",
default=LibraryTypes.FR_UNSTRANDED)
parser.add_argument("index_dir")
parser.add_argument("input_bam_file")
parser.add_argument("paired_bam_file")
parser.add_argument("unmapped_bam_file")
args = parser.parse_args()
return find_discordant_fragments(args.input_bam_file,
args.paired_bam_file,
args.unmapped_bam_file,
args.index_dir,
max_isize=args.max_fragment_length,
library_type=args.library_type)
if __name__ == '__main__':
sys.exit(main())
|
Alwnikrotikz/chimerascan
|
chimerascan/deprecated/find_discordant_reads_v5.0.0a.py
|
Python
|
gpl-3.0
| 14,265
|
[
"pysam"
] |
4cb7a84f56d71f25df2d0f0de726baba3eec9476dc53b4427302f64d840b105d
|
"""Reproduce results from Fig. 2 of `bib.raanes2014ext`."""
import numpy as np
import dapper.mods as modelling
from dapper.mods.LA import Fmat, sinusoidal_sample
from dapper.mods.Lorenz96 import LPs
from dapper.tools.linalg import tsvd
# Burn-in allows damp*x and x+noise balance out
tseq = modelling.Chronology(dt=1, dko=5, T=500, BurnIn=60, Tplot=100)
Nx = 1000
Ny = 40
jj = modelling.linspace_int(Nx, Ny)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.01
#################
# Noise setup #
#################
# Instead of sampling model noise from sinusoidal_sample(),
# we will replicate it below by a covariance matrix approach.
# But, for strict equivalence, one would have to use
# uniform (i.e. not Gaussian) random numbers.
wnumQ = 25
sample_filename = modelling.rc.dirs.samples/('LA_Q_wnum%d.npz' % wnumQ)
try:
# Load pre-generated
L = np.load(sample_filename)['Left']
except FileNotFoundError:
# First-time use
print('Did not find sample file', sample_filename,
'for experiment initialization. Generating...')
NQ = 20000 # Must have NQ > (2*wnumQ+1)
A = sinusoidal_sample(Nx, wnumQ, NQ)
A = 1/10 * (A - A.mean(0)) / np.sqrt(NQ)
Q = A.T @ A
U, s, _ = tsvd(Q)
L = U*np.sqrt(s)
np.savez(sample_filename, Left=L)
X0 = modelling.GaussRV(C=modelling.CovMat(np.sqrt(5)*L, 'Left'))
###################
# Forward model #
###################
damp = 0.98
Fm = Fmat(Nx, -1, 1, tseq.dt)
def step(x, t, dt):
assert dt == tseq.dt
return x @ Fm.T
Dyn = {
'M': Nx,
'model': lambda x, t, dt: damp * step(x, t, dt),
'linear': lambda x, t, dt: damp * Fm,
'noise': modelling.GaussRV(C=modelling.CovMat(L, 'Left')),
}
HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LPs(jj))
####################
# Suggested tuning
####################
# Expected rmse.a = 0.3
# xp = EnKF('PertObs',N=30,infl=3.2)
# Note that infl=1 may yield approx optimal rmse, even though then rmv << rmse.
# Why is rmse so INsensitive to inflation, especially for PertObs?
# Reproduce raanes'2015 "extending sqrt method to model noise":
# xp = EnKF('Sqrt',fnoise_treatm='XXX',N=30,infl=1.0),
# where XXX is one of:
# - Stoch
# - Mult-1
# - Mult-M
# - Sqrt-Core
# - Sqrt-Add-Z
# - Sqrt-Dep
# Other notes:
# - Multidim. multiplicative noise incorporation
# has a tendency to go awry.
# The main reason is that it changes the ensemble subspace,
# away from the "model" subspace.
# - There are also some very strong, regular correlation
# patters that arise when dt=1 (dt = c*dx).
# - It also happens if X0pat does not use centering.
|
nansencenter/DAPPER
|
dapper/mods/LA/raanes2015.py
|
Python
|
mit
| 2,659
|
[
"Gaussian"
] |
fe5404c83e351704ab4ec15423add8a46bf95a48b96aef54211679dd7cb8409c
|
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from vPiP import *
Vpip = vPiP.Vpip
with Vpip() as p:
try:
p.moveTo(0, 0)
p.drawTo(p.config.pixels, 0)
p.drawTo(p.config.pixels, p.config.heightPixels)
p.drawTo(0, p.config.heightPixels)
p.drawTo(0, 0)
p.moveTo(1, 1)
p.drawTo(p.config.pixels - 1, 1)
p.drawTo(p.config.pixels - 1, p.config.heightPixels - 1)
p.drawTo(1, p.config.heightPixels - 1)
p.drawTo(1, 1)
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
|
brianinnes/pycupi
|
python/border.py
|
Python
|
apache-2.0
| 1,270
|
[
"Brian"
] |
289d1be87e76277dd7232bac30570690563dac7989f241de0020dcfa4cd361e9
|
"""
Supercell class
Class to store supercells of crystals. A supercell is a lattice model of a crystal, with
periodically repeating unit cells. In that framework we can
1. add/remove/substitute atoms
2. find the transformation map between two different representations of the same supercell
3. output POSCAR format (possibly other formats?)
"""
__author__ = 'Dallas R. Trinkle'
import numpy as np
import collections, copy, itertools, warnings
from numbers import Integral
from onsager import crystal
from functools import reduce
# TODO: add "parser"--read CONTCAR file, create Supercell
# TODO: output PairState from Supercell
class Supercell(object):
"""
A class that defines a Supercell of a crystal.
Takes in a crystal, a supercell (3x3 integer matrix). We can identify sites
as interstitial sites, and specify if we'll have solutes.
"""
def __init__(self, crys, super, interstitial=(), Nsolute=0, empty=False, NOSYM=False):
"""
Initialize our supercell to an empty supercell.
:param crys: crystal object
:param super: 3x3 integer matrix
:param interstitial: (optional) list/tuple of indices that correspond to interstitial sites
:param Nsolute: (optional) number of substitutional solute elements to consider; default=0
:param empty: (optional) designed to allow "copy" to work--skips all derived info
:param NOSYM: (optional) does not do symmetry analysis (intended ONLY for testing purposes)
"""
self.crys = crys
self.super = super.copy()
self.interstitial = copy.deepcopy(interstitial)
self.Nchem = crys.Nchem + Nsolute if Nsolute > 0 else crys.Nchem
if empty: return
# everything else that follows is "derived" from those initial parameters
self.lattice = np.dot(self.crys.lattice, self.super)
self.N = self.crys.N
self.atomindices, self.indexatom = self.crys.atomindices, \
{ci: n for n, ci in enumerate(self.crys.atomindices)}
self.chemistry = [crys.chemistry[n] if n < crys.Nchem else '' for n in range(self.Nchem + 1)]
self.chemistry[-1] = 'v'
self.Wyckofflist, self.Wyckoffchem = [], []
for n, (c, i) in enumerate(self.atomindices):
for wset in self.Wyckofflist:
if n in wset: break
if len(self.Wyckofflist) == 0 or n not in wset:
# grab the set of (c,i) of Wyckoff sets (next returns first that matches, None if none):
indexset = next((iset for iset in self.crys.Wyckoff if (c, i) in iset), None)
self.Wyckofflist.append(frozenset([self.indexatom[ci] for ci in indexset]))
self.Wyckoffchem.append(self.crys.chemistry[c])
self.size, self.invsuper, self.translist, self.transdict = self.maketrans(self.super)
# self.transdict = {tuple(t):n for n,t in enumerate(self.translist)}
self.pos, self.occ = self.makesites(), -1 * np.ones(self.N * self.size, dtype=int)
self.chemorder = [[] for n in range(self.Nchem)]
if NOSYM:
self.G = frozenset([crystal.GroupOp.ident([self.pos])])
else:
self.G = self.gengroup()
# some attributes we want to do equate, others we want deepcopy. Equate should not be modified.
__copyattr__ = ('lattice', 'N', 'chemistry', 'size', 'invsuper',
'Wyckofflist', 'Wyckoffchem', 'occ', 'chemorder')
__eqattr__ = ('atomindices', 'indexatom', 'translist', 'transdict', 'pos', 'G')
def copy(self):
"""
Make a copy of the supercell; initializes, then copies over ``__copyattr__`` and
``__eqattr__``.
:return: new supercell object, copy of the original
"""
supercopy = self.__class__(self.crys, self.super, self.interstitial, self.Nchem - self.crys.Nchem,
empty=True)
for attr in self.__copyattr__: setattr(supercopy, attr, copy.deepcopy(getattr(self, attr)))
for attr in self.__eqattr__: setattr(supercopy, attr, getattr(self, attr))
return supercopy
def __eq__(self, other):
"""
Return True if two supercells are equal; this means they should have the same occupancy.
*and* the same ordering
:param other: supercell for comparison
:return: True if same crystal, supercell, occupancy, and ordering; False otherwise
"""
return isinstance(other, self.__class__) and np.all(self.super == other.super) and \
self.interstitial == other.interstitial and np.allclose(self.pos, other.pos) and \
np.all(self.occ == other.occ) and self.chemorder == other.chemorder
def __ne__(self, other):
"""Inequality == not __eq__"""
return not self.__eq__(other)
def stoichiometry(self):
"""Return a string representing the current stoichiometry"""
return ','.join([c + '_i({})'.format(len(l))
if n in self.interstitial
else c + '({})'.format(len(l))
for n, c, l in zip(itertools.count(),
self.chemistry,
self.chemorder)])
def __str__(self):
"""Human readable version of supercell"""
str = "Supercell of crystal:\n{crys}\n".format(crys=self.crys)
str += "Supercell vectors:\n{}\nChemistry: ".format(self.super.T)
str += self.stoichiometry()
str += '\nKroger-Vink: ' + self.KrogerVink()
str += '\nPositions:\n'
str += '\n'.join([u.__str__() + ' ' + self.chemistry[o] for u, o in zip(self.pos, self.occ)])
str += '\nOrdering:\n'
str += '\n'.join([u.__str__() + ' ' + c for c, ulist in zip(self.chemistry, self.occposlist())
for u in ulist])
return str
def __mul__(self, other):
"""
Multiply by a GroupOp; returns a new supercell (constructed via copy).
:param other: must be a GroupOp (and *should* be a GroupOp of the supercell!)
:return: rotated supercell
"""
if not isinstance(other, crystal.GroupOp): return NotImplemented
gsuper = self.copy()
gsuper *= other
return gsuper
def __rmul__(self, other):
"""
Multiply by a GroupOp; returns a new supercell (constructed via copy).
:param other: must be a GroupOp (and *should* be a GroupOp of the supercell!)
:return: rotated supercell
"""
if not isinstance(other, crystal.GroupOp): return NotImplemented
return self.__mul__(other)
def __imul__(self, other):
"""
Multiply by a GroupOp, in place.
:param other: must be a GroupOp (and *should* be a GroupOp of the supercell!)
:return: self
"""
if not isinstance(other, crystal.GroupOp): return NotImplemented
# This requires some careful manipulation: we need to modify (1) occ, and (2) chemorder
indexmap = other.indexmap[0]
gocc = self.occ.copy()
for ind, gind in enumerate(indexmap):
gocc[gind] = self.occ[ind]
self.occ = gocc
self.chemorder = [[indexmap[ind] for ind in clist] for clist in self.chemorder]
return self
def index(self, pos, threshold=1.):
"""
Return the index that corresponds to the position *closest* to pos in the supercell.
Done in direct coordinates of the supercell, using periodic boundary conditions.
:param pos: 3-vector
:param threshold: (optional) minimum squared "distance" in supercell for a match; default=1.
:return index: index of closest position
"""
index, dist2 = None, threshold
for ind, u in enumerate(self.pos):
delta = crystal.inhalf(pos - u)
d2 = np.sum(delta * delta)
if d2 < dist2: index, dist2 = ind, d2
return index
def __getitem__(self, key):
"""
Index into supercell
:param key: index (either an int, a slice, or a position)
:return: chemical occupation at that point
"""
if isinstance(key, Integral) or isinstance(key, slice): return self.occ[key]
if isinstance(key, np.ndarray) and key.shape == (3,): return self.occ[self.index(key)]
raise TypeError('Inappropriate key {}'.format(key))
def __setitem__(self, key, value):
"""
Set specific composition for site; uses same indexing as __getitem__
:param key: index (either an int, a slice, or a position)
:param value: chemical occupation at that point
"""
if isinstance(key, slice): return NotImplemented
index = None
if isinstance(key, Integral): index = key
if isinstance(key, np.ndarray) and key.shape == (3,): index = self.index(key)
self.setocc(index, value)
def __sane__(self):
"""Return True if supercell occupation and chemorder are consistent"""
occset = set()
for c, clist in enumerate(self.chemorder):
for ind in clist:
# check that occupancy (from chemorder) is correct:
if self.occ[ind] != c: return False
# record as an occupied state
occset.add(ind)
# now make sure that every site *not* in occset is, in fact, vacant
for ind, c in enumerate(self.occ):
if ind not in occset:
if c != -1: return False
return True
@staticmethod
def maketrans(super):
"""
Takes in a supercell matrix, and returns a list of all translations of the unit
cell that remain inside the supercell
:param super: 3x3 integer matrix
:return size: integer, corresponding to number of unit cells
:return invsuper: integer matrix inverse of supercell (needs to be divided by size)
:return translist: list of integer vectors (to be divided by ``size``) corresponding
to unit cell positions
:return transdict: dictionary of tuples and their corresponding index (inverse of trans)
"""
size = abs(int(np.round(np.linalg.det(super))))
if size==0: raise ZeroDivisionError('Tried to use a singular supercell.')
invsuper = np.round(np.linalg.inv(super) * size).astype(int)
maxN = abs(super).max()
translist, transdict = [], {}
for nvect in [np.array((n0, n1, n2))
for n0 in range(-maxN, maxN + 1)
for n1 in range(-maxN, maxN + 1)
for n2 in range(-maxN, maxN + 1)]:
tv = np.dot(invsuper, nvect) % size
ttup = tuple(tv)
if ttup not in transdict:
transdict[ttup] = len(translist)
translist.append(tv)
if len(translist) != size:
raise ArithmeticError(
'Somehow did not generate the correct number of translations? {}!={}'.format(size, len(translist)))
return size, invsuper, translist, transdict
def makesites(self):
"""
Generate the array corresponding to the sites; the indexing is based on the translations
and the atomindices in crys. These may not all be filled when the supercell is finished.
:return pos: array [N*size, 3] of supercell positions in direct coordinates
"""
invsize = 1 / self.size
basislist = [np.dot(self.invsuper, self.crys.basis[c][i]) for (c, i) in self.atomindices]
return np.array([crystal.incell((t + u) * invsize) for t in self.translist for u in basislist])
def gengroup(self):
"""
Generate the group operations internal to the supercell
:return G: set of GroupOps
"""
Glist = []
unittranslist = [np.dot(self.super, t) // self.size for t in self.translist]
invsize = 1 / self.size
for g0 in self.crys.G:
Rsuper = np.dot(self.invsuper, np.dot(g0.rot, self.super))
if not np.all(Rsuper % self.size == 0):
warnings.warn(
'Broken symmetry? GroupOp:\n{}\nnot a symmetry operation of supercell?\nRsuper=\n{}'.format(g0,
Rsuper),
RuntimeWarning, stacklevel=2)
continue
else:
# divide out the size (in inverse super). Should still be an integer matrix (and hence, a symmetry)
Rsuper //= self.size
for u in unittranslist:
# first, make the corresponding group operation by adding the unit cell translation:
g = g0 + u
# translation vector *in the supercell*; go ahead and keep it inside the supercell, too.
tsuper = (np.dot(self.invsuper, g.trans) % self.size) * invsize
# finally: indexmap!!
indexmap = []
for R in unittranslist:
for ci in self.atomindices:
Rp, ci1 = self.crys.g_pos(g, R, ci)
# A little confusing, but:
# [n]^-1*Rp -> translation, but needs to be mod self.size
# convert to a tuple, to the index into transdict
# THEN multiply by self.N, and add the index of the new Wyckoff site. Whew!
indexmap.append(
self.transdict[tuple(np.dot(self.invsuper, Rp) % self.size)] * self.N + self.indexatom[ci1])
if len(set(indexmap)) != self.N * self.size:
raise ArithmeticError('Did not produce a correct index mapping for GroupOp:\n{}'.format(g))
Glist.append(crystal.GroupOp(rot=Rsuper, cartrot=g0.cartrot, trans=tsuper,
indexmap=(tuple(indexmap),)))
return frozenset(Glist)
def definesolute(self, c, chemistry):
"""
Set the name of the chemistry of chemical index c. Only works for substitutional solutes.
:param c: index
:param chemistry: string
"""
if c < self.crys.Nchem or c >= self.Nchem:
raise IndexError('Trying to set the chemistry for a lattice atom / vacancy')
self.chemistry[c] = chemistry
def setocc(self, ind, c):
"""
Set the occupancy of position indexed by ind, to chemistry c. Used by all the other algorithms.
:param ind: integer index
:param c: chemistry index
"""
if c < -2 or c > self.crys.Nchem:
raise IndexError('Trying to occupy with a non-defined chemistry: {} out of range'.format(c))
corig = self.occ[ind]
if corig != c:
if corig >= 0:
# remove from chemorder list (if not vacancy)
co = self.chemorder[corig]
co.pop(co.index(ind))
if c >= 0:
# add to chemorder list (if not vacancy)
self.chemorder[c].append(ind)
# finally: set the occupancy
self.occ[ind] = c
def fillperiodic(self, ci, Wyckoff=True):
"""
Occupies all of the (Wyckoff) sites corresponding to chemical index with the appropriate chemistry.
:param ci: tuple of (chem, index) in crystal
:param Wyckoff: (optional) if False, *only* occupy the specific tuple, but still periodically
:return self:
"""
if __debug__:
if ci not in self.indexatom: raise IndexError('Tuple {} not a corresponding atom index'.format(ci))
ind = self.indexatom[ci]
indlist = next((nset for nset in self.Wyckofflist if ind in nset), None) if Wyckoff else (ind,)
for i in [n * self.N + i for n in range(self.size) for i in indlist]:
self.setocc(i, ci[0])
return self
def occposlist(self):
"""
Returns a list of lists of occupied positions, in (chem)order.
:return occposlist: list of lists of supercell coord. positions
"""
return [[self.pos[ind] for ind in clist] for clist in self.chemorder]
def POSCAR(self, name=None, stoichiometry=True):
"""
Return a VASP-style POSCAR, returned as a string.
:param name: (optional) name to use for first list
:param stoichiometry: (optional) if True, append stoichiometry to name
:return POSCAR: string
"""
POSCAR = "" if name is None else name
if stoichiometry: POSCAR += " " + self.stoichiometry()
POSCAR += """
1.0
{a[0][0]:21.16f} {a[1][0]:21.16f} {a[2][0]:21.16f}
{a[0][1]:21.16f} {a[1][1]:21.16f} {a[2][1]:21.16f}
{a[0][2]:21.16f} {a[1][2]:21.16f} {a[2][2]:21.16f}
""".format(a=self.lattice)
POSCAR += ' '.join(['{}'.format(len(clist)) for clist in self.chemorder])
POSCAR += '\nDirect\n'
POSCAR += '\n'.join([" {u[0]:19.16f} {u[1]:19.16f} {u[2]:19.16f}".format(u=u)
for clist in self.occposlist() for u in clist])
# needs a trailing newline
return POSCAR + '\n'
__vacancyformat__ = "v_{sitechem}"
__interstitialformat__ = "{chem}_i"
__antisiteformat__ = "{chem}_{sitechem}"
def defectindices(self):
"""
Return a dictionary that corresponds to the "defect" content of the supercell.
:return defects: dictionary, keyed by defect type, with a set of indices of corresponding defects
"""
def adddefect(name, index):
if name in defects:
defects[name].add(index)
else:
defects[name] = set([index])
defects = {}
sitechem = [self.chemistry[c] for (c, i) in self.atomindices]
for wset, chem in zip(self.Wyckofflist, self.Wyckoffchem):
for i in wset:
if self.atomindices[i][0] in self.interstitial:
for n in range(self.size):
ind = n * self.N + i
c = self.occ[ind]
if c != -1: adddefect(self.__interstitialformat__.format(chem=self.chemistry[c]), ind)
else:
sc = sitechem[i]
for n in range(self.size):
ind = n * self.N + i
c = self.occ[ind]
if self.chemistry[c] != sc:
name = self.__vacancyformat__.format(sitechem=sitechem[i]) \
if c == -1 else \
self.__antisiteformat__.format(chem=self.chemistry[c], sitechem=sc)
adddefect(name, ind)
return defects
def KrogerVink(self):
"""
Attempt to make a "simple" string based on the defectindices, using Kroger-Vink notation.
That is, we identify: vacancies, antisites, and interstitial sites, and return a string.
NOTE: there is no relative charges, so this is a pseudo-KV notation.
:return KV: string representation
"""
defects = self.defectindices()
return '+'.join(["{}{}".format(len(defects[name]), name)
if len(defects[name]) > 1 else name
for name in sorted(defects.keys())])
def reorder(self, mapping):
"""
Reorder (in place) the occupied sites. Does not change the occupancies, only the ordering
for "presentation".
:param mapping: list of maps; will make newchemorder[c][i] = chemorder[c][mapping[c][i]]
:return self:
If mapping is not a proper permutation, raises ValueError.
"""
neworder = [[clist[cmap[i]] for i in range(len(clist))]
for clist, cmap in zip(self.chemorder, mapping)]
self.chemorder, oldorder = neworder, self.chemorder
if not self.__sane__():
self.chemorder = oldorder
raise ValueError('Mapping {} is not a proper permutation'.format(mapping))
return self
def equivalencemap(self, other):
"""
Given the super ``other`` we want to find a group operation that transforms ``self``
into other. This is a GroupOp *along* with an index mapping of chemorder. The index
mapping is to get the occposlist to match up:
``(g*self).occposlist()[c][mapping[c][i]] == other.occposlist()[c][i]``
(We can write a similar expression using chemorder, since chemorder indexes into pos).
We're going to return both g and mapping.
*Remember:* ``g`` does not change the presentation ordering; ``mapping`` is
necessary for full equivalence. If no such equivalence, return ``None,None``.
:param other: Supercell
:return g: GroupOp to transform sites from ``self`` to ``other``
:return mapping: list of maps, such that (g*self).chemorder[c][mapping[c][i]] == other.chemorder[c][i]
"""
# 1. check that our defects even match up:
selfdefects, otherdefects = self.defectindices(), other.defectindices()
for k, v in selfdefects.items():
if k not in otherdefects: return None, None
if len(v) != len(otherdefects[k]): return None, None
for k, v in otherdefects.items():
if k not in selfdefects: return None, None
if len(v) != len(selfdefects[k]): return None, None
# 2. identify the shortest common set of defects:
defcount = {k: len(v) for k, v in selfdefects.items()}
deftype = min(defcount, key=defcount.get) # key to min value from dictionary
shortset, matchset = selfdefects[deftype], otherdefects[deftype]
mapping = None
gocc = self.occ.copy()
for g in self.G:
# 3. check against the shortest list of defects:
indexmap = g.indexmap[0]
if any(indexmap[i] not in matchset for i in shortset): continue
# 4. having checked that shortlist, check the full mapping:
for ind, gind in enumerate(indexmap):
gocc[gind] = self.occ[ind]
if np.any(gocc != other.occ): continue
# 5. we have a winner. Now it's all up to getting the mapping; done with index()
gorder = [[indexmap[ind] for ind in clist] for clist in self.chemorder]
mapping = []
for gclist, otherlist in zip(gorder, other.chemorder):
mapping.append([gclist.index(index) for index in otherlist])
break
if mapping is None: return None, mapping
return g, mapping
|
DallasTrinkle/Onsager
|
onsager/supercell.py
|
Python
|
mit
| 22,765
|
[
"CRYSTAL",
"VASP"
] |
b82ece69acb4fd41c275e6c59cb39d159cb6eb8d141f61af7a0dfa95b92b7b59
|
################################
# Author : septicmk
# Date : 2015/09/05 16:57:57
# FileName : visualization.py
################################
from vtkpython import *
import math
import pandas as pd
from random import random
class vtkTimerCallback():
def __init__(self):
self.timer_count = 0
def execute(self,obj,event):
#print self.timer_count
celllist = self.dataset[self.timer_count]
for i in range(len(self.actorlist)):
print str(i) + " " + str(celllist[i]['pos'][0]) + " " + str(celllist[i]['pos'][1]) + " " + str(celllist[i]['pos'][2])
self.actorlist[i].SetPosition(float(celllist[i]['pos'][0]),float(celllist[i]['pos'][1]),float(celllist[i]['pos'][2]));
#self.actorlist[i].SetPosition(random()*20,random()*20,random()*20)
iren = obj
iren.GetRenderWindow().Render()
self.timer_count += 1
class Visualization:
def __init__(self):
'''
'''
self.cell_table = pd.read_pickle("cell_table.pkl")
self.cell_table.to_csv("cell_tabel.csv")
def makeCells(self):
'''
using dataset to get cell list.
return: A List consisting of [vtkActor]
'''
cellactorlist = []
for i, row in self.cell_table.iterrows():
spheresrc = vtk.vtkSphereSource()
spheresrc.SetCenter(row[u'x'],row[u'y'],row[u'z']*7)
spheresrc.SetRadius(row[u'size'])
spheresrc.SetPhiResolution(25)
spheresrc.SetThetaResolution(25)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(spheresrc.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
cellactorlist.append(actor)
return cellactorlist
def export2AVI(self):
'''
render the scence then export it to an AVI file
'''
cellactorlist = self.makeCells()
# print len(cellactorlist)
ren = vtk.vtkRenderer()
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(ren)
#iren = vtk.vtkRenderWindowInteractor()
#iren.SetRenderWindow(renwin)
for cellactor in cellactorlist:
ren.AddActor(cellactor)
ren.SetBackground(0,0,0,)
renwin.SetSize(400,400)
#iren.Initialize()
ren.ResetCamera()
ren.GetActiveCamera().Zoom(0.5)
renwin.Render()
#iren.Start()
writer = vtk.vtkAVIWriter()
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(renwin)
writer.SetInputConnection(w2i.GetOutputPort())
writer.SetFileName("cell_visualization.avi")
writer.Start()
for celllist in self.dataset:
for i in range(len(cellactorlist)):
cellactorlist[i].SetPosition(float(celllist[i]['pos'][0]),float(celllist[i]['pos'][1]),float(celllist[i]['pos'][2]))
w2i.Modified()
#writer.Write()
writer.Write()
writer.End()
def debug(self):
'''
Debug
'''
cellactorlist = self.makeCells()
# print len(cellactorlist)
ren = vtk.vtkRenderer()
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renwin)
for cellactor in cellactorlist:
ren.AddActor(cellactor)
ren.SetBackground(0,0,0,)
renwin.SetSize(400,400)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(0.5)
renwin.Render()
iren.Initialize()
#cb = vtkTimerCallback()
#cb.actorlist = cellactorlist
#cb.dataset = self.dataset
#iren.AddObserver('TimerEvent', cb.execute)
#timerId = iren.CreateRepeatingTimer(10)
iren.Start()
if __name__ == '__main__':
vis = Visualization()
vis.debug()
#vis.export2AVI()
|
septicmk/MEHI
|
MEHI/utils/visualization.py
|
Python
|
bsd-3-clause
| 4,115
|
[
"VTK"
] |
7343a45764f11ddbd953c840258377d35368f04c3dd319123d5ec9dec452b566
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# vim:fdm=marker
#
# =============================================================
# Copyright © 2018 Daniel Santiago <dpelaez@cicese.edu.mx>
# Distributed under terms of the GNU/GPL license.
# =============================================================
"""
"""
# --- import libs ---
import numpy as np
import datetime as dt
import pandas as pd
import netCDF4 as nc
import scipy.signal as signal
import os
import csv
import pickle
#
import tools.wdm as wdm
import tools.spectra as sp
# === Read Data Class ===
# {{{
class ReadData(object):
# main folder
bomm_number = 1
basepath = "/Volumes/BOMM1/dataBOMM1/data"
# private methods to read data {{{
def __init__(self, date):
self.date = date
def __repr__(self):
return f"BOMM{self.bomm_number} data at {self.date}"
# parse hour line into floating
def _parsedate(self, string):
"""Parse the date HH:MM:SS.ffffff YYYY mm dd."""
hour, dd, mm, yy = string.split()
H, M, sec = hour.split(":")
try:
S, f = sec.split(".")
except ValueError:
S, f = sec, '0'
return dt.datetime(int(yy), int(mm), int(dd), int(H), int(M), int(S), int(f))
# get file name from a given date
def _getfilename(self, sensor, date):
"""Returns filename based on date, sensor, and path."""
if sensor in ["maximet"]:
fmt = f"/{sensor}/%Y/%m/%d/{sensor}-%y%m%d%H.csv"
elif sensor in ["rbr"]:
fmt = f"/{sensor}/%Y/%m/{sensor}-%y%m%d.csv"
else:
fmt = f"/{sensor}/%Y/%m/%d/%H/{sensor}-%y%m%d%H%M.csv"
self.filename = self.basepath + dt.datetime.strftime(date, fmt)
return self.filename
# read file
def _readfile(self, sensor, date, columns):
"""This function reads the data of the original files."""
# get filename
filename = self._getfilename(sensor, date)
# pre-define variables
time = []
obs = {v: [] for v in columns.values()}
# read file
# TODO: some files have lines with null bytes. Check it!
with open(filename, 'r') as f:
data = csv.reader(f, delimiter=',')
for row in data:
time.append(self._parsedate(" ".join(row[:4])))
for k, v in columns.items():
try:
obs[v].append(float(row[k]))
except ValueError:
obs[v].append(row[k])
# convert to numpy array and add time array
obs["time"] = time
for k, v in obs.items():
obs[k] = np.asarray(v)
return obs
# new time array
def _getnewtime(self, date, fs, N=600):
"""Returns list of datetimes of N seconds at fs sampling frequency."""
seconds = np.arange(0, N, 1/fs)
return np.array([date + dt.timedelta(seconds=s) for s in seconds])
# interpolate the data
def _resample(self, dic, date, fs, N=600):
"""This function uses pandas for an accurate resample"""
# create new time array
time_new = self._getnewtime(date, fs, N)
# create pandas dataframe using the data in the dictionary
t = dic["time"]
df = pd.DataFrame({k:v for k,v in dic.items()
if k not in ["time"]}, index=t)
# check the number of nans
n = len(t)
n_nans = df.isnull().sum().max()
if n_nans / n > 0.1:
raise Exception(f"Number of NaNs is {n_nans:d} out of {n:d}.")
# if n < 0.1*len(time_new):
# raise Exception(f"More than 10% of data is missing for this file.")
# drop missing values only if are less than 10 percent of the data
# TODO: not more than 100 consecutive missing values
df = df.dropna(how="any")
# remove duplicate indices if they exist
df = df[~df.index.duplicated(keep='first')]
# sort data in ascending and reindex to the new time
# i still dont know what is the difference between ffill/bfill
l = fs if fs>=1 else None
df = df.sort_index().reindex(time_new, limit=1, method="bfill").ffill()
# crate new dictionary for output
outdic = {c:df[c].as_matrix() for c in df}
outdic["time"] = time_new
return outdic
# high pass filter
def _butterwoth(self, x, fs, fcut):
"""Apply a butterworth filter before of resampling"""
raise NotImplementedError
# correct NULL bytes
def _remove_null_bytes(self, filename):
"""Remove NULL bytes in file represented by '\0'."""
raise NotImplementedError
# }}}
# read waves method {{{
def waves(self):
# parameters
date = self.date
sensor = "wstaff"
fac = 3.5/4095
columns = {4:"ws1", 5:"ws2", 6:"ws3", 7:"ws4", 8:"ws5", 9:"ws6"}
# load observed data
obs = self._readfile(sensor, date, columns)
# apply correction factor of 3.5/4095
for k in columns.values():
obs[k] *= fac
# return data
return self._resample(obs, date, fs=20, N=600) # 10-minutes blocks
# }}}
# read winds method {{{
def winds(self):
# parameters
date = self.date
sensor = "anemometro"
columns = {5:"U", 6:"V", 7:"W", 8:"T"}
# load observed data
obs = self._readfile(sensor, date, columns)
# return data
return self._resample(obs, date, fs=100, N=600) # 10-minutes blocks
# }}}
# read motion method {{{
def motion(self):
# parameters
date = self.date
sensor = "acelerometro"
columns = {
6:"surge", 7:"sway", 8:"heave",
9:"dpitch_dt", 10:"droll_dt", 11:"dyaw_dt",
13:"dvel_x", 14:"dvel_y", 15:"dvel_z",
16:"dang_x", 17:"dang_y", 18:"dang_z"
}
# load observed data
obs = self._readfile(sensor, date, columns)
# correction to match buoy frame of reference
obs["sway"] *= -1
obs["heave"] *= -1
obs["droll_dt"] *= -1
obs["dyaw_dt"] *= -1
obs["dvel_y"] *= -1
obs["dvel_z"] *= -1
obs["dang_y"] *= -1
obs["dang_z"] *= -1
# return data
return self._resample(obs, date, fs=100, N=600) # 10-minutes blocks
# }}}
# read maximet method {{{
def meteo(self):
# parameters
date = self.date.replace(minute=0)
sensor = "maximet"
columns = {
4: "uWdir",
6: "cWdir",
5: "Wspd",
7: "Patm",
8: "rhum",
9: "Tair",
11: "rain"
}
# load observed data
obs = self._readfile(sensor, date, columns)
# return data
return self._resample(obs, date, fs=1, N=3600) # 60-minutes blocks
# }}}
# read water quality method {{{
def water(self):
# parameters
date = self.date.replace(hour=0, minute=0)
sensor = "rbr"
columns = {4:"conductivity", 5:"temperature", 11:"salinity"}
# load observed data
obs = self._readfile(sensor, date, columns)
# return data
return self._resample(obs, date, fs=1/600, N=86400) # 1-day blocks
# }}}
# read any sensor {{{
def read(self, sensor):
"""Returns data of the specified sensor or nan if file doesnt exist"""
sensor_list = {
'wavestaff' : (self.waves, 20, 600),
'anemometro' : (self.winds, 100, 600),
'acelerometro' : (self.motion, 100, 600),
'maximet' : (self.meteo, 1, 3600),
'rbr' : (self.water, 1/600, 86400)
}
function, fs, N = sensor_list[sensor]
try:
# read data
data = function()
# if file does not exist or not valid
except Exception as e:
print(e)
#
dummy_date = dt.datetime(2017,11,17,0,0)
dummy_objc = ReadData(dummy_date)
data = dummy_objc.read(sensor)
data["time"] = self._getnewtime(self.date, fs, N)
for k in data.keys():
if k not in ["time"]:
data[k] *= np.nan
return data
# }}}
# }}}
# === Write NetCDF file ===
# {{{
# generator of date list {{{
def dates():
"""Returns a generator containing dates of bomm measurements each day."""
start = dt.datetime(2017, 11, 17)
final = dt.datetime(2018, 1, 31)
while start <= final:
yield start
start += dt.timedelta(days=1)
# }}}
# write water variables {{{
def write_water_group(dataset, date):
"""Write variables and attributes associated to water group."""
print("wat --> ", date)
# get data from the given date
wat = ReadData(date).read("rbr")
# create group, dimensions and global attributtes
water = dataset.createGroup(f"{hgrp}/water")
water.createDimension("time", None)
water.description = "Measurements of RBR Concerto"
water.sampling_frequency = 1/600
# variable attributes
units = {
"time": global_time_units,
"temperature": "Degrees Celsius",
"salinity": "ppm",
"conductivity": "mS/cm"
}
# create each variable
for k, v in wat.items():
water.createVariable(k, "f8", "time")
water[k].units = units[k]
if k not in "time":
water[k][:] = v
else:
water["time"][:] = nc.date2num(v, global_time_units)
# }}}
# write meteorological variables {{{
def write_meteo_group(dataset, date):
"""Write variables and attributes associated to meteo group."""
# create group, dimensions and global attributtes
meteo = dataset.createGroup(f"{hgrp}/meteo")
meteo.createDimension("time", None)
meteo.description = "Measurements of Gill Maximet GMX-600"
meteo.comments = "BOMM1: North mark is rotated 60 degrees counterclockwise"
meteo.sampling_frequency = 1
# variable attributes
units = {
"time": global_time_units,
"uWdir": "Azimuth in nautical convention",
"cWdir": "Azimuth in nautical convention",
"Wspd": "m/s",
"Patm": "mbar",
"rhum": "Percentage",
"Tair": "Degrees Celsius",
"rain": "mm/h"
}
# create each variable
for k, v in units.items():
meteo.createVariable(k, "f8", "time")
meteo[k].units = v
# loop for each data
seconds_per_hour = 3600
i, j = 0, seconds_per_hour
for hr in range(24):
#
file_date = date + dt.timedelta(hours=hr)
print("met --> ", file_date)
#
# load data
met = ReadData(file_date).read("maximet")
#
for k, v in met.items():
if k not in "time":
meteo[k][i:j] = v
else:
meteo["time"][i:j] = nc.date2num(v, global_time_units)
# update counter
i, j = j, j + seconds_per_hour
# }}}
# write waves variables {{{
def write_waves_group(dataset, date):
"""Write variables and attributes associated to waves group."""
# position of the wavestaff relative to the buoy
Lx, Ly = wdm.reg_array(N=5, R=0.866, theta_0=-180)
# add correction due to position of the accelerometers
x_offset, y_offset, z_offset = -0.413, -0.339, 4.45
Lx += x_offset
Ly += y_offset
Lz = np.zeros(6) + z_offset
# create group, dimensions and global attributtes
waves = dataset.createGroup(f"{hgrp}/waves")
waves.createDimension("time", None)
waves.description = "Measurements of wavestaff wires in a pentageonal array"
waves.Lx, waves.Ly, waves.Lz = Lx, Ly, Lz
waves.sampling_frequency = 20
# variable attributes
units = {
"time": global_time_units,
"ws1": "m",
"ws2": "m",
"ws3": "m",
"ws4": "m",
"ws5": "m",
"ws6": "m"
}
# create each variable
for k, v in units.items():
waves.createVariable(k, "f8", "time")
waves[k].units = v
# loop for each data
samples_per_ten_minutes = int(600*20)
i, j = 0, samples_per_ten_minutes
#
for hr in range(24):
#
for mn in range(0, 60, 10):
file_date = date+dt.timedelta(hours=hr, minutes=mn)
print("wav --> ", file_date)
# load data
wav = ReadData(file_date).read("wavestaff")
# assign data to netctd variable
for k, v in wav.items():
if k not in "time":
waves[k][i:j] = v
else:
waves["time"][i:j] = nc.date2num(v, global_time_units)
# update counter
i, j = j, j + samples_per_ten_minutes
# }}}
# write wind variables {{{
def write_wind_group(dataset, date):
"""Write variables and attributes associated to wind group."""
# locations
L = (-0.413, -0.339, 13.01)
wind = dataset.createGroup(f"{hgrp}/wind")
wind.createDimension("time", None)
wind.description = "Measurements of sonic anemometer Gill R3100"
wind.sampling_frequency = 100
wind.convention = "Cartesian: W positive up, U positive 30 ccw, V positive 120 ccw"
wind.comments = "Data are rotated 30 degrees ccw from north mark"
wind.L = L
# variable attributes
units = {
"time": global_time_units,
"U": "m/s",
"V": "m/s",
"W": "m/s",
"T": "Degrees Celsius",
}
# create each variable
for k, v in units.items():
wind.createVariable(k, "f8", "time")
wind[k].units = v
# loop for each data
samples_per_ten_minutes = int(600*100)
i, j = 0, samples_per_ten_minutes
#
for hr in range(24):
#
for mn in range(0, 60, 10):
file_date = date+dt.timedelta(hours=hr, minutes=mn)
print("wnd --> ", file_date)
# load data
wnd = ReadData(file_date).read("anemometro")
# assign data to netctd variable
for k, v in wnd.items():
if k not in "time":
wind[k][i:j] = v
else:
wind["time"][i:j] = nc.date2num(v, global_time_units)
# update counter
i, j = j, j + samples_per_ten_minutes
# }}}
# write motion variables {{{
def write_motion_group(dataset, date):
"""Write variables and attributes associated to motion group."""
motion = dataset.createGroup(f"{hgrp}/motion")
motion.createDimension("time", None)
motion.description = "Measurements of Subsea MRU Ekinok2-M"
motion.sampling_frequency = 100
motion.convention = "X positive towards north buoy, Y eastward and Z downward"
# variable attributes
units = {
"time": global_time_units,
"surge": "m/s^2", "sway": "m/s^2", "heave": "m/s^2",
"dpitch_dt": "rad/s", "droll_dt": "rad/s", "dyaw_dt": "rad/s",
"dvel_x": "m/s^2", "dvel_y": "m/s^2", "dvel_z": "m/s^2",
"dang_x": "rad/s", "dang_y": "rad/s", "dang_z": "rad/s"
}
# create each variable
for k, v in units.items():
motion.createVariable(k, "f8", "time")
motion[k].units = v
# loop for each data
samples_per_ten_minutes = int(600*100)
i, j = 0, samples_per_ten_minutes
#
for hr in range(24):
#
for mn in range(0, 60, 10):
file_date = date+dt.timedelta(hours=hr, minutes=mn)
print("mot --> ", file_date)
# load data
mot = ReadData(file_date).read("acelerometro")
# assign data to netctd variable
for k, v in mot.items():
if k not in "time":
motion[k][i:j] = v
else:
motion["time"][i:j] = nc.date2num(v, global_time_units)
# update counter
i, j = j, j + samples_per_ten_minutes
# }}}
# list of writer functions {{{
def writers_list():
return (write_water_group, write_meteo_group, write_waves_group,
write_wind_group, write_motion_group)
# }}}
# write netcdf for all data {{{
def write_netcdf():
"""Write all raw data in a nice netCDF4 format."""
# global time units
global_time_units = f"seconds since {next(dates())}"
# netcdf filename
ncfilename = f"{ReadData.basepath}/bomm.data.nc"
with nc.Dataset(ncfilename, "w") as dataset:
# loop for each date
for date in dates():
# name of the group associated with each day
hgrp = date.strftime("%Y%m%d")
for func in writers_list():
func(dataset, date)
print("-"*37 + "\n")
# }}}
# }}}
# === Common useful functions ===
# {{{
# get index for specific date {{{
def get_index(date, fs, number_of_minutes=30):
"""Return group name and index for specific date."""
# group name
hgrp = date.strftime("%Y%m%d")
# number of samples
N = int(fs * 24 * 3600)
hour, minute = date.hour, date.minute
# start and final index
i = int(fs*hour*3600 + fs*minute*60)
j = i + int(fs*number_of_minutes*60)
return hgrp, (i, j)
# }}}
# create dictionary from motion package data {{{
def get_dictionary(group, date):
"""Construct motion dictionary resampled at specific sampling frequency.
Args:
group (obj): Group of netCDF4 variable for a specific day
index (tuple): Indices corresponding to star and final analysis time.
Returns:
Dictionary containig all variables.
"""
# start and final indices
fs = group.sampling_frequency
hgrp, (i, j) = get_index(date, fs, number_of_minutes=30)
dic = {}
for k in group.variables.keys():
if k not in ["time"]:
dic[k] = group[k][i:j]
return dic
# }}}
# fancy nanmean without akward warning msg {{{
def nanmean(x):
nans = np.isnan(x).nonzero()[0]
if len(nans) == len(x):
return np.nan
else:
return np.nanmean(x)
# }}}
# despinking function {{{
def simple_despike(x, value=1):
x[abs(x) > value] = np.nan
return x
# }}}
# }}}
# === Motion correction functions ===
# {{{
# butterworth lowpass filter {{{
def butter_lowpass_filter(data, cutoff=2, fs=100, order=5):
if cutoff is not None:
b, a = signal.butter(order, cutoff/(0.5*fs), btype='low', analog=False)
data_filtered = signal.filtfilt(b, a, data)
return data_filtered
else:
return data
# }}}
# compute euler angles {{{
def euler_angles(mot, step=1):
"""Compute euler angles and smooth acceleromter signals."""
# remove nans
for k, v in mot.items():
nans = np.isnan(v)
if len(nans.nonzero()[0]) / len(v) < 0.1:
mot[k][nans] = 0
else:
raise Exception("More than 10% of invalid data")
# compute pitch roll and yaw
for k in ["pitch", "roll", "yaw"]:
mot[k] = int_fft(mot[f"d{k}_dt"], 100, -1, 0.04)
#
surge = butter_lowpass_filter(mot["surge"])[::step]
sway = butter_lowpass_filter(mot["sway"])[::step]
heave = butter_lowpass_filter(mot["heave"])[::step]
#
pitch = butter_lowpass_filter(mot["pitch"])[::step]
roll = butter_lowpass_filter(mot["roll"])[::step]
yaw = butter_lowpass_filter(mot["yaw"])[::step]
#
dpitch_dt = butter_lowpass_filter(mot["dpitch_dt"])[::step]
droll_dt = butter_lowpass_filter(mot["droll_dt"])[::step]
dyaw_dt = butter_lowpass_filter(mot["dyaw_dt"])[::step]
return surge, sway, heave, pitch, roll, yaw, dpitch_dt, droll_dt, dyaw_dt
# }}}
# integration in the frequency domain {{{
def fft_intdiff(signal, fs, order=-1, fcut=None):
"""Intergral or differentiation in frequency domain.
This function performs an integration of a discrete time series
in the frequency domain taking advantage of the Fourier transform
properties. Depending of the argument order, this funcion also can
be used to differentiate a time series.
d^n u /
----- = IFFT{(iw)^n * FFT{u}} and | u dt = IFFT{FFT{u} / (iw)^n}
dt^n /
n
Args:
signal (array): Time series.
fs (float): sampling frequency
order (int): order of the integral or derivative
* if order = 0 returns signal
* if order > 0 returns n-order-derivative
* if order < 0 returns n-order-integral
fcut (float): cut-off frequency to filter low frequency noise in case of
a an integral (order<0) and high frequency noise in case of a derivative
(order>0).
Returns:
array: Integrated or derivated signal
Note:
Actually the filter is only making zeros the energy asociated to the
frequencies lower (greater) than fcut when integrating (derivating). A
most powerfull filter should be designed.
"""
nans = np.isnan(signal)
if len(nans.nonzero()[0]) / len(signal) < 0.1:
signal[nans] = 0
else:
return signal * np.nan
# if order == 0 do nothing
if order == 0:
return signal
# TODO: check for nans if more than 10 percents
# get frequency array
N = len(signal)
freqs = np.fft.fftfreq(N, 1/fs)
# the first element of freqs array is zero, so we have
# to discard it to avoid division by zero
# the factor of integration is iw
factor = 1j*2*np.pi*freqs[1:]
# compute fft of the signal for the non-zero frequencies
# and apply integration factor
fft = np.zeros(len(freqs), 'complex')
fft[1:] = np.fft.fft(signal)[1:] * factor**order
# high pass filter for integrals
if order < 0:
if fcut is None:
return np.fft.ifft(fft).real
else:
ix = abs(freqs) <= fcut
fft[ix] = 0.
# low pass filter for derivatives
if order > 0:
if fcut is None:
return np.fft.ifft(fft).real
else:
ix = abs(freqs) >= fcut
fft[ix] = 0.
# returns real inverse transormed signal
return np.fft.ifft(fft).real
def diff_fft(signal, fs, order=-1, fcut=None):
"""diferentiation in the frequency domain: see `fft_intdiff`"""
if order > 0:
return fft_intdiff(signal, fs, order, fcut)
else:
raise ValueError("Order must be a positive integer")
def int_fft(signal, fs, order=-1, fcut=None):
"""integration in the frequency domain: see `fft_intdiff`"""
if order < 0:
return fft_intdiff(signal, fs, order, fcut)
else:
raise ValueError("Order must be a negative integer")
# --- }}}
# velocity_correction {{{
def velocity_correction(u, v, w, mot, L=(0,0,0), fs=100, fcut=0.04):
"""Velocity correction.
This function applies the correction of the wind speed measured by
a sonic anemometer due to the buoy inertial movements
The equation to perfom such correction is given by
/
U = T U_obs + Om x T L + T | a dt
/
---v--- -----v----- -----v-----
U_trans U_rot U_buoy
where T is a rotation matrix given by:
| cosp*cosy sinp*sinr*cosy-cosr*siny siny*sinr+sinp*cosr*cosy |
T = | cosp*siny cosr*cosy+sinr*sinp*siny -sinr*cosy+cosr*siny*sinp |
| -sinp cosp*sinr cosp*cosr |
in which, `p` is pitch (theta), `r` is roll (phi) and `y` is yaw (psi)
In the same way, the matrix of angular rate of changes is given by:
| - dpdt siny + drdt cosp cosy |
Omega = | dpdt cosy + drdt cosp siny |
| dydt - drdt sinp |
Args:
u, v, w (arrays): Contains the three components of velocity.
motion (dict): Dictionary containing time series of:
* accelerations: surge, sway, heave
* inclination rates: dpitch_dt, droll_dt, dyaw_dt
* euler angles: pitch, roll, yaw
L (list or tuple): Three elements, containing the relative distance of
the wavestaff to the accelerometer package.
fs (float): Sampling frequency of the time series
fcut (float): Cut-off frequency for the low pass filter after
integration of the accelerations
Returns:
u_real, v_real, w_real: Tuple with corrected velocity components.
References:
* Anctil Donelan Drennan Graber 1994, JAOT 11, 1144-1150
* Drennan Donelan Madsen Katsaros Terray Flagg 1994, JAOT 11, 1109-1116
"""
# detrend signal
d = lambda x: x - np.nanmean(x)
# get main variables
u_obs, v_obs, w_obs = u.copy(), v.copy(), w.copy()
(surge, sway, heave, pitch, roll, yaw,
dpitch_dt, droll_dt, dyaw_dt) = euler_angles(mot, step=1)
# compute sines and cosines for each angle
cp, sp = np.cos(pitch), np.sin(pitch)
cr, sr = np.cos(roll), np.sin(roll)
#
myaw = np.arctan2(np.nanmean(np.sin(yaw)), np.nanmean(np.cos(yaw)))
cy, sy = np.cos(yaw-myaw), np.sin(yaw-myaw)
# compute observed velocities into earth reference frame
u_trans = u_obs*cp*cy + v_obs*(sp*sr*cy - cr*sy) + w_obs*(sy*sr + sp*cr*cy)
v_trans = u_obs*cp*sy + v_obs*(cr*cy + sr*sp*sy) + w_obs*(-sr*cy + cr*sy*sp)
w_trans = -u_obs*sp + v_obs*cp*sr + w_obs*cp*cr
# compute buoy velocity components from accelerations
g = 9.8
vel_x = int_fft(surge + g*sp, fs, -1, fcut)
vel_y = int_fft(sway - g*sr*cp, fs, -1, fcut)
vel_z = int_fft(heave - g*cp*cr, fs, -1, fcut)
# compute transformed velocities into earth reference frame
u_buoy = vel_x*cp*cy + vel_y*(sp*sr*cy - cr*sy) + vel_z*(sy*sr + sp*cr*cy)
v_buoy = vel_x*cp*sy + vel_y*(cr*cy + sr*sp*sy) + vel_z*(-sr*cy + cr*sy*sp)
w_buoy = -vel_x*sp + vel_y*cp*sr + vel_z*cp*cr
# compute transformed position of the anemometer
Lx, Ly, Lz = L
Lx_e = Lx*cp*cy + Ly*(sp*sr*cy - cr*sy) + Lz*(sy*sr + sp*cr*cy)
Ly_e = Lx*cp*sy + Ly*(cr*cy + sr*sp*sy) + Lz*(-sr*cy + cr*sy*sp)
Lz_e = -Lx*sp + Ly*cp*sr + Lz*cp*cr
# compute rotation matrix components
Om_x = -dpitch_dt * sy + droll_dt * cp * cy
Om_y = dpitch_dt * cy + droll_dt * cp * sy
Om_z = dyaw_dt - droll_dt * sp
# compute the rotated values of the velocity components
u_rot = (Om_y*Lz_e - Om_z*Ly_e)
v_rot = -(Om_x*Lz_e - Om_z*Lx_e)
w_rot = (Om_x*Ly_e - Om_y*Lx_e)
# compute real surface elevation
u_real = u_trans + u_rot + u_buoy
v_real = v_trans + v_rot + v_buoy
w_real = w_trans + w_rot + w_buoy
return u_real, v_real, w_real
# --- }}}
# position_correction {{{
def position_correction(x, y, z, mot, fs=20, fcut=0.04):
"""Correcion of the surfave elevation.
This function applies the correction of the surface elevation measured by
the wavestaffs due to the buoy inertial movements.
The equation to perfom such correction is given by
/ //
X = T Xo + | Om x T Xo dt + T || a dt dt
/ //
---v--- -------v------- -----v-----
x_trans x_rot x_buoy
where T is a rotation matrix given by:
| cosp*cosy sinp*sinr*cosy-cosr*siny siny*sinr+sinp*cosr*cosy |
T = | cosp*siny cosr*cosy+sinr*sinp*siny -sinr*cosy+cosr*siny*sinp |
| -sinp cosp*sinr cosp*cosr |
in which, `p` is pitch (theta), `r` is roll (phi) and `y` is yaw (psi)
In the same way, the matrix of angular rate of changes is given by:
| - dpdt siny + drdt cosp cosy |
Omega = | dpdt cosy + drdt cosp siny |
| dydt - drdt sinp |
Arguments:
x, y, z (float): Northward, westward and upward coordinates
of each wavestaff.
motion (dict): Containing time series of:
* accelerations: surge, sway, heave
* inclination rates: dpitch_dt, droll_dt, dyaw_dt
* euler angles: pitch, roll, yaw
fs (float): Sampling frequency of the time series.
fcut (float): Cut-off frequency for the low pass filter after
integration of the accelerations.
Returns:
x_real, y_real, z_real: numpy array with the corrected surface elev.
References:
* Anctil Donelan Drennan Graber 1994, JAOT 11, 1144-1150
* Drennan Donelan Madsen Katsaros Terray Flagg 1994, JAOT 11, 1109-1116
"""
# compute pitch roll and yaw
(surge, sway, heave, pitch, roll, yaw,
dpitch_dt, droll_dt, dyaw_dt) = euler_angles(mot, step=5)
# compute sines and cosines
cp, sp = np.cos(pitch), np.sin(pitch)
cr, sr = np.cos(roll), np.sin(roll)
cy, sy = np.cos(yaw), np.sin(yaw)
# compute surface elevation transformed into the inertial frame
x_trans = x*cp*cy + y*(sp*sr*cy - cr*sy) + z*(sy*sr + sp*cr*cy)
y_trans = x*cp*sy + y*(cr*cy + sr*sp*sy) + z*(-sr*cy + cr*sy*sp)
z_trans = -x*sp + y*cp*sr + z*cp*cr
# buoy movements
g = 9.8
pos_x = int_fft(surge + g*sp, fs, -2, fcut)
pos_y = int_fft(sway - g*sr*cp, fs, -2, fcut)
pos_z = int_fft(heave - g*cp*cr, fs, -2, fcut)
# compute transformed positions into earth reference frame
x_buoy = pos_x*cp*cy + pos_y*(sp*sr*cy - cr*sy) + pos_z*(sy*sr + sp*cr*cy)
y_buoy = pos_x*cp*sy + pos_y*(cr*cy + sr*sp*sy) + pos_z*(-sr*cy + cr*sy*sp)
z_buoy = -pos_x*sp + pos_y*cp*sr + pos_z*cp*cr
# compute transformed position
x_e = x*cp*cy + y*(sp*sr*cy - cr*sy) + z*(sy*sr + sp*cr*cy)
y_e = x*cp*sy + y*(cr*cy + sr*sp*sy) + z*(-sr*cy + cr*sy*sp)
z_e = -x*sp + y*cp*sr + z*cp*cr
# compute rotation matrix components
Om_x = -dpitch_dt * sy + droll_dt * cp * cy
Om_y = dpitch_dt * cy + droll_dt * cp * sy
Om_z = dyaw_dt - droll_dt * sp
# compute the rotated values of the postion vector
x_rot = int_fft((Om_y*z_e - Om_z*y_e), fs, -1, fcut)
y_rot = int_fft(-(Om_x*z_e - Om_z*x_e), fs, -1, fcut)
z_rot = int_fft((Om_x*y_e - Om_y*x_e), fs, -1, fcut)
# compute real surface elevation
x_real = x_trans + x_rot + x_buoy
y_real = y_trans + y_rot + y_buoy
z_real = z_trans + z_rot + z_buoy
return x_real, y_real, z_real
# --- }}}
# }}}
# === Wavestaff funcions ===
# {{{
# wavestaff correction {{{
def wavestaff_correction(wav, mot, Lx, Ly, Lz):
"""This function returns the corrected data of the wstaffs.
Args:
wav (dic): Dictionay with the measurements of each wavestaff.
mot (dict): Dictionary with the motion packages variables
Lx, Ly, Lz (array): Position of the wavestaffs
Returns:
(dic): Dictionary with the corrected wavestaff data.
"""
# compute time variation of the postion
ntime = wav["ws1"].shape[0]
nstaff = 6
#
xx = np.zeros((ntime, nstaff))
yy = np.zeros((ntime, nstaff))
zz = np.zeros((ntime, nstaff))
#
corrected = {}
for i in range(nstaff):
xx[:,i], yy[:,i], zz[:,i] = position_correction(
Lx[i], Ly[i], Lz[i] + wav[f"ws{i+1:d}"], mot)
corrected[f"ws{i+1:d}"] = zz[:,i] - np.nanmean(zz[:,i])
#
return corrected
# }}}
# frequency spectrum {{{
def frequency_spectrum(wav, fft_params=None):
"""Returns the waves spectrum using Welch method."""
# length of data
ntime = len(wav["ws1"])
nstaff = len(wav)
# check for NANs
for k, v in wav.items():
nans = np.isnan(v)
if len(nans.nonzero()[0]) / len(v) < 0.1:
wav[k][nans] = 0
else:
raise Exception("More than 10% of invalid data")
# fft parameters
if fft_params is None:
fft_params = {
"fs": 20, # <- sampling frquency
"nperseg": 3600, # <- lenght of each segment
"noverlap": 1800, # <- points to overlap between segments
"window": 'hann', # <- tapering window
"detrend": 'linear' # <- detrend data linearly
}
# trim boundary of time series
# NOTE: I think it is not necessary
# i = int(0.5 * (ntime - 2**np.floor(np.log2(ntime))))
# compute periodiogram for each wire
Pxx = np.zeros((int(fft_params["nperseg"]/2)+1, nstaff))
for j in range(nstaff):
frqs, Pxx[:,j] = signal.welch(wav[f"ws{j+1:d}"], **fft_params)
# returns frequency and averaged power density without first value
return frqs[1:], np.nanmean(Pxx[:,1:], axis=1)[1:]
# }}}
# }}}
# === Sonic anemometer functions ===
# {{{
# wind data rotation {{{
def wind_rotation(wnd, theta=np.radians(30)):
"""This function returns the rotated velocity components."""
U_unc = wnd["U"] * 1.0
V_unc = wnd["V"] * 1.0
U_aligned = U_unc*np.cos(theta) + V_unc*np.sin(theta)
V_aligned = U_unc*np.sin(theta) - V_unc*np.cos(theta)
wnd["U_rot"] = U_unc
wnd["V_rot"] = V_unc
wnd["U"] = U_aligned
wnd["V"] = V_aligned
# }}}
# wind data correction {{{
def wind_correction(wnd, mot, L):
"""This function returns the corrected velocity components.
Args:
wnd (dic): Dictionary with sonic anemometer data
mot (dic): Dictionary with motion sensor data
L (array): Three elements array with location of sonic anemometer
respect to motion sensors.
"""
# get data
U, V, W = wnd["U"], wnd["V"], wnd["W"]
# apply correction using the proper function
U_real, V_real, W_real = velocity_correction(U, V, W, mot, L)
return U_real, V_real, W_real
# }}}
# eddy correlation method {{{
def eddy_correlation_flux(U, V, W, T):
"""Computes momentum flux from corrected velocity components.
Args:
U, V, W, T (array): Array with sonic anemometer variables.
Returns:
tuple: x and y momentum flux and latent heat flux.
References:
* https://www.eol.ucar.edu/content/wind-direction-quick-reference
"""
# align with max variability axis (average V = 0)
theta = np.arctan2(np.nanmean(V), np.nanmean(U)) #<- from U to V counterclockwise
U_stream = U*np.cos(theta) + V*np.sin(theta)
V_stream = -U*np.sin(theta) + V*np.cos(theta)
# align with the flow to do mean W equals zero
phi = np.arctan2(np.nanmean(W), np.nanmean(U_stream)) #<- from U to W counterclockwise
U_proj = U_stream*np.cos(phi) + W*np.sin(phi)
V_proj = V_stream.copy()
W_proj = -U_stream*np.sin(phi) + W*np.cos(phi)
# compute turbulent fluxes
d = lambda x: signal.detrend(x, type="linear")
u, v, w, T = d(U_proj), d(V_proj), d(W_proj), d(T)
uw, vw, wT = np.mean(u*w), np.mean(v*w), np.mean(w*T)
return uw, vw, wT
# }}}
# }}}
if __name__ == "__main__":
pass
# === end of file ===
|
dspelaez/tools
|
tools/bomm.py
|
Python
|
gpl-3.0
| 35,771
|
[
"NetCDF"
] |
533e58b1cc4ffc6c3a56583ea1a8848e854eb2b0c2ff30aef19eaf161d100d20
|
# -*- coding: utf-8 -*-
""" Sahana Eden Incident Reporting Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3IRSModel",
"S3IRSResponseModel",
"irs_rheader"
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3IRSModel(S3Model):
names = ("irs_icategory",
"irs_ireport",
"irs_ireport_person",
"irs_ireport_id"
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# List of Incident Categories
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
irs_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
"missingPerson.amberAlert": T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
"missingPerson.silver": T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# This Table defines which Categories are visible to end-users
tablename = "irs_icategory"
define_table(tablename,
Field("code",
label = T("Category"),
requires = IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
*s3_meta_fields())
configure(tablename,
list_fields = ["code"],
onvalidation = self.irs_icategory_onvalidation,
)
# ---------------------------------------------------------------------
# Reports
# This is a report of an Incident
#
# Incident Reports can be linked to Incidents through the event_incident_report table
#
# @ToDo: If not using the Events module, we could have a 'lead incident' to track duplicates?
#
# Porto codes
#irs_incident_type_opts = {
# 1100:T("Fire"),
# 6102:T("Hazmat"),
# 8201:T("Rescue")
#}
tablename = "irs_ireport"
define_table(tablename,
super_link("sit_id", "sit_situation"),
super_link("doc_id", "doc_entity"),
Field("name",
label = T("Short Description"),
requires = IS_NOT_EMPTY()),
Field("message", "text",
label = T("Message"),
represent = lambda text: \
s3_truncate(text, length=48, nice=True)),
Field("category",
label = T("Category"),
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
requires = IS_EMPTY_OR(IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts))),
# Use this instead if a simpler set of Options required
#requires = IS_EMPTY_OR(IS_IN_SET(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
self.hrm_human_resource_id(
#readable=False,
#writable=False,
label = T("Reported By (Staff)")
),
# Plain text field in case non-staff & don't want to clutter the PR
Field("person",
#readable = False,
#writable = False,
label = T("Reported By (Not Staff)"),
#comment = (T("At/Visited Location (not virtual)"))
),
Field("contact",
readable = False,
writable = False,
label = T("Contact Details")),
s3_datetime("datetime",
label = T("Date/Time of Alert"),
empty=False,
default="now",
future=0,
),
s3_datetime("expiry",
label = T("Expiry Date/Time"),
past=0,
),
self.gis_location_id(),
# Very basic Impact Assessment
# @ToDo: Use Stats_Impact component instead
Field("affected", "integer",
label=T("Number of People Affected"),
represent = lambda val: val or T("unknown"),
),
Field("dead", "integer",
label=T("Number of People Dead"),
represent = lambda val: val or T("unknown"),
),
Field("injured", "integer",
label=T("Number of People Injured"),
represent = lambda val: val or T("unknown"),
),
# Probably too much to try & capture
#Field("missing", "integer",
# label=T("Number of People Missing")),
#Field("displaced", "integer",
# label=T("Number of People Displaced")),
Field("verified", "boolean", # Ushahidi-compatibility
# We don't want these visible in Create forms
# (we override in Update forms in controller)
readable = False,
writable = False,
label = T("Verified?"),
represent = lambda verified: \
(T("No"),
T("Yes"))[verified == True]
),
# @ToDo: Move this to Events?
# Then add component to list_fields
s3_datetime("dispatch",
label = T("Date/Time of Dispatch"),
future=0,
# We don't want these visible in Create forms
# (we override in Update forms in controller)
readable = False,
writable = False,
),
Field("closed", "boolean",
# We don't want these visible in Create forms
# (we override in Update forms in controller)
default = False,
readable = False,
writable = False,
label = T("Closed?"),
represent = lambda closed: \
(T("No"),
T("Yes"))[closed == True]
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_INC_REPORT = T("Create Incident Report")
crud_strings[tablename] = Storage(
label_create = ADD_INC_REPORT,
title_display = T("Incident Report Details"),
title_list = T("Incident Reports"),
title_update = T("Edit Incident Report"),
title_upload = T("Import Incident Reports"),
title_map = T("Map of Incident Reports"),
label_list_button = T("List Incident Reports"),
label_delete_button = T("Delete Incident Report"),
msg_record_created = T("Incident Report added"),
msg_record_modified = T("Incident Report updated"),
msg_record_deleted = T("Incident Report deleted"),
msg_list_empty = T("No Incident Reports currently registered"))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
filter_widgets = [
S3TextFilter(["name",
"message",
"comments",
],
label=T("Description"),
comment = T("You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents."),
_class="filter-search",
),
S3LocationFilter("location_id",
levels = levels,
#hidden = True,
),
S3OptionsFilter("category",
#hidden = True,
),
S3DateFilter("datetime",
label = T("Date"),
hide_time = True,
#hidden = True,
),
]
report_fields = ["category",
"datetime",
]
for level in levels:
report_fields.append("location_id$%s" % level)
# Resource Configuration
configure(tablename,
filter_widgets = filter_widgets,
list_fields = ["id",
"name",
"category",
"datetime",
"location_id",
#"organisation_id",
"affected",
"dead",
"injured",
"verified",
"message",
],
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Number of Incidents"), "count(id)"),
(T("Total Affected"), "sum(affected)"),
(T("Total Dead"), "sum(dead)"),
(T("Total Injured"), "sum(injured)"),
],
defaults = dict(rows = "location_id$%s" % levels[0], # Highest-level of hierarchy
cols = "category",
fact = "count(id)",
totals = True,
)
),
super_entity = ("sit_situation", "doc_entity"),
)
# Components
if settings.get_irs_vehicle():
# @ToDo: This workflow requires more work
hr_link_table = "irs_ireport_vehicle_human_resource"
else:
hr_link_table = "irs_ireport_human_resource"
add_components(tablename,
# Tasks
project_task={"link": "project_task_ireport",
"joinby": "ireport_id",
"key": "task_id",
"actuate": "replace",
"autocomplete": "name",
"autodelete": False,
},
# Vehicles
asset_asset={"link": "irs_ireport_vehicle",
"joinby": "ireport_id",
"key": "asset_id",
"name": "vehicle",
# Dispatcher doesn't need to Add/Edit records, just Link
"actuate": "link",
"autocomplete": "name",
"autodelete": False,
},
# Human Resources
hrm_human_resource={"link": hr_link_table,
"joinby": "ireport_id",
"key": "human_resource_id",
# Dispatcher doesn't need to Add/Edit HRs, just Link
"actuate": "hide",
"autocomplete": "name",
"autodelete": False,
},
# Affected Persons
pr_person={"link": "irs_ireport_person",
"joinby": "ireport_id",
"key": "person_id",
"actuate": "link",
#"actuate": "embed",
#"widget": S3AddPersonWidget2(),
"autodelete": False,
},
)
ireport_id = S3ReusableField("ireport_id", "reference %s" % tablename,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"irs_ireport.id",
self.irs_ireport_represent)),
represent = self.irs_ireport_represent,
label = T("Incident"),
ondelete = "CASCADE")
# Custom Methods
set_method("irs", "ireport",
method = "dispatch",
action=self.irs_dispatch)
set_method("irs", "ireport",
method = "timeline",
action = self.irs_timeline)
set_method("irs", "ireport",
method = "ushahidi",
action = self.irs_ushahidi_import)
if settings.has_module("fire"):
create_next = URL(args=["[id]", "human_resource"])
else:
create_next = URL(args=["[id]", "image"])
configure("irs_ireport",
create_next = create_next,
create_onaccept = self.ireport_onaccept,
update_next = URL(args=["[id]", "update"])
)
# -----------------------------------------------------------
# Affected Persons
tablename = "irs_ireport_person"
define_table(tablename,
ireport_id(),
self.pr_person_id(),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Return model-global names to response.s3
#
return dict(irs_ireport_id = ireport_id,
irs_incident_type_opts = irs_incident_type_opts,
)
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
- used by events module
& legacy assess & impact modules
"""
ireport_id = S3ReusableField("ireport_id", "integer",
readable=False, writable=False)
return Storage(irs_ireport_id = ireport_id)
# -------------------------------------------------------------------------
@staticmethod
def irs_icategory_onvalidation(form):
"""
Incident Category Validation:
Prevent Duplicates
Done here rather than in .requires to maintain the dropdown.
"""
db = current.db
table = db.irs_icategory
category, error = IS_NOT_ONE_OF(db, "irs_icategory.code")(form.vars.code)
if error:
form.errors.code = error
return False
# -------------------------------------------------------------------------
@staticmethod
def irs_ireport_represent(id, row=None):
"""
Represent an Incident Report via it's name
"""
if row:
return row.name
elif not id:
return current.messages["NONE"]
db = current.db
table = db.irs_ireport
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def ireport_onaccept(form):
"""
Assign the appropriate vehicle & on-shift team to the incident
@ToDo: Specialist teams
@ToDo: Make more generic (currently Porto-specific)
"""
settings = current.deployment_settings
if settings.has_module("fire") and settings.has_module("vehicle"):
pass
else:
# Not supported!
return
db = current.db
s3db = current.s3db
vars = form.vars
ireport = vars.id
category = vars.category
if category == "1100":
# Fire
types = ["VUCI", "ABSC"]
elif category == "6102":
# Hazmat
types = ["VUCI", "VCOT"]
elif category == "8201":
# Rescue
types = ["VLCI", "ABSC"]
else:
types = ["VLCI"]
# 1st unassigned vehicle of the matching type
# @ToDo: Filter by Org/Base
# @ToDo: Filter by those which are under repair (asset_log)
table = s3db.irs_ireport_vehicle
stable = s3db.org_site
atable = s3db.asset_asset
vtable = s3db.vehicle_vehicle
ftable = s3db.fire_station
fvtable = s3db.fire_station_vehicle
for type in types:
query = (atable.type == s3db.asset_types["VEHICLE"]) & \
(vtable.type == type) & \
(vtable.asset_id == atable.id) & \
(atable.deleted == False) & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(atable.id == table.asset_id)
vehicle = db(query).select(atable.id,
left=left,
limitby=(0, 1)).first()
if vehicle:
vehicle = vehicle.id
query = (vtable.asset_id == vehicle) & \
(fvtable.vehicle_id == vtable.id) & \
(ftable.id == fvtable.station_id) & \
(stable.id == ftable.site_id)
site = db(query).select(stable.id,
limitby=(0, 1)).first()
if site:
site = site.id
table.insert(ireport_id=ireport,
asset_id=vehicle,
site_id=site)
if settings.has_module("hrm"):
# Assign 1st 5 human resources on-shift
# @ToDo: We shouldn't assign people to vehicles automatically - this is done as people are ready
# - instead we should simply assign people to the incident & then use a drag'n'drop interface to link people to vehicles
# @ToDo: Filter by Base
table = s3db.irs_ireport_vehicle_human_resource
htable = s3db.hrm_human_resource
on_shift = s3db.fire_staff_on_duty()
query = on_shift & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(htable.id == table.human_resource_id)
people = db(query).select(htable.id,
left=left,
limitby=(0, 5))
# @ToDo: Find Ranking person to be incident commander
leader = people.first()
if leader:
leader = leader.id
for person in people:
if person.id == leader.id:
table.insert(ireport_id=ireport,
asset_id=vehicle,
human_resource_id=person.id,
incident_commander=True)
else:
table.insert(ireport_id=ireport,
asset_id=vehicle,
human_resource_id=person.id)
# -------------------------------------------------------------------------
@staticmethod
def irs_dispatch(r, **attr):
"""
Send a Dispatch notice from an Incident Report
- this will be formatted as an OpenGeoSMS
"""
if r.representation == "html" and \
r.name == "ireport" and r.id and not r.component:
T = current.T
msg = current.msg
record = r.record
id = record.id
contact = ""
if record.contact:
contact = "\n%s: %s" (T("Contact"),
record.contact)
message = ""
if record.message:
message = "\n%s" % record.message
text = "SI#%s\n%s%s%s" % (id,
record.name,
contact,
message)
text += "\nSend help to see how to respond!"
# Encode the message as an OpenGeoSMS
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
# URL to redirect to after message sent
url = URL(c="irs",
f="ireport",
args=r.id)
# Create the form
opts = dict(
type="SMS",
# @ToDo: deployment_setting
subject = T("Deployment Request"),
message = message,
url = url,
#formid = r.id
)
# Pre-populate the recipients list if we can
# @ToDo: Check that we have valid contact details
# - slower, but useful to fail early if we need to
s3db = current.s3db
if current.deployment_settings.get_irs_vehicle():
# @ToDo: This workflow requires more work
# - no ic defined yet in this case
table = s3db.irs_ireport_vehicle_human_resource
else:
table = s3db.irs_ireport_human_resource
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
query = (table.ireport_id == id) & \
(table.deleted == False) & \
(table.human_resource_id == htable.id) & \
(htable.person_id == ptable.id)
recipients = current.db(query).select(table.incident_commander,
ptable.pe_id)
if not recipients:
# Provide an Autocomplete the select the person to send the notice to
opts["recipient_type"] = "pr_person"
elif len(recipients) == 1:
# Send to this person
opts["recipient"] = recipients.first()["pr_person"].pe_id
else:
# Send to the Incident Commander
ic = False
for row in recipients:
if row["irs_ireport_human_resource"].incident_commander == True:
opts["recipient"] = row["pr_person"].pe_id
ic = True
break
if not ic:
# Provide an Autocomplete the select the person to send the notice to
opts["recipient_type"] = "pr_person"
output = msg.compose(**opts)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Send Dispatch Update")
current.response.view = "msg/compose.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
@staticmethod
def irs_timeline(r, **attr):
"""
Display the Incidents on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
@ToDo: Play button
http://www.simile-widgets.org/wiki/Timeline_Moving_the_Timeline_via_Javascript
"""
if r.representation == "html" and r.name == "ireport":
T = current.T
db = current.db
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
itable = s3db.doc_image
dtable = s3db.doc_document
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % request.application)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % request.application)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
r.resource.load(limit=2000)
rows = r.resource._rows
data = {"dateTimeFormat": "iso8601",
}
now = request.utcnow
tl_start = tl_end = now
events = []
for row in rows:
# Dates
start = row.datetime or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
end = row.expiry or ""
if end:
if end > tl_end:
tl_end = end
end = end.isoformat()
# Image
# Just grab the first one for now
query = (itable.deleted == False) & \
(itable.doc_id == row.doc_id)
image = db(query).select(itable.url,
limitby=(0, 1)).first()
if image:
image = image.url or ""
# URL
link = URL(args=[row.id])
events.append({"start": start,
"end": end,
"title": row.name,
"caption": row.message or "",
"description": row.message or "",
"image": image or "",
"link": link or "",
# @ToDo: Colour based on Category (More generically: Resource or Resource Type)
#"color" : "blue',
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = dict(item = item)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Incident Timeline")
response.view = "timeline.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
@staticmethod
def irs_ushahidi_import(r, **attr):
"""
Import Incident Reports from Ushahidi
@ToDo: Deployment setting for Ushahidi instance URL
"""
T = current.T
auth = current.auth
request = current.request
response = current.response
session = current.session
# Method is only available to Admins
system_roles = session.s3.system_roles
ADMIN = system_roles.ADMIN
if not auth.s3_has_role(ADMIN):
auth.permission.fail()
if r.representation == "html" and \
r.name == "ireport" and not r.component and not r.id:
url = r.get_vars.get("url", "http://")
title = T("Import Incident Reports from Ushahidi")
form = FORM(
TABLE(
TR(
TH(B("%s: " % T("URL"))),
INPUT(_type="text", _name="url", _size="100",
_value=url,
requires=[IS_URL(), IS_NOT_EMPTY()]),
TH(DIV(SPAN("*", _class="req",
_style="padding-right: 5px;")))
),
TR(
TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors"))
),
TR("", INPUT(_type="submit", _value=T("Import")))
))
rheader = DIV(P("%s: http://wiki.ushahidi.com/doku.php?id=ushahidi_api" % \
T("API is documented here")),
P("%s URL: http://ushahidi.my.domain/api?task=incidents&by=all&resp=xml&limit=1000" % \
T("Example")))
output = dict(title=title,
form=form,
rheader=rheader)
if form.accepts(request.vars, session):
# "Exploit" the de-duplicator hook to count import items
import_count = [0]
def count_items(job, import_count=import_count):
if job.tablename == "irs_ireport":
import_count[0] += 1
current.s3db.configure("irs_report", deduplicate=count_items)
vars = form.vars
ushahidi_url = vars.url
import os
stylesheet = os.path.join(request.folder, "static", "formats",
"ushahidi", "import.xsl")
if os.path.exists(stylesheet) and ushahidi_url:
ignore_errors = vars.get("ignore_errors", None)
try:
success = r.resource.import_xml(ushahidi_url,
stylesheet=stylesheet,
ignore_errors=ignore_errors)
except:
import sys
e = sys.exc_info()[1]
response.error = e
else:
if success:
count = import_count[0]
if count:
response.confirmation = "%s %s" % \
(import_count[0],
T("reports successfully imported."))
else:
response.information = T("No reports available.")
else:
response.error = self.error
response.view = "create.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# =============================================================================
class S3IRSResponseModel(S3Model):
"""
Tables used when responding to Incident Reports
- with HRMs &/or Vehicles
Currently this has code specific to Porto Firefighters
@ToDo: Replace with Deployment module
"""
names = ("irs_ireport_human_resource",
"irs_ireport_vehicle",
"irs_ireport_vehicle_human_resource"
)
def model(self):
T = current.T
db = current.db
human_resource_id = self.hrm_human_resource_id
ireport_id = self.irs_ireport_id
define_table = self.define_table
configure = self.configure
settings = current.deployment_settings
hrm = settings.get_hrm_show_staff()
vol = settings.has_module("vol")
if hrm and not vol:
hrm_label = T("Staff")
elif vol and not hrm:
hrm_label = T("Volunteer")
else:
hrm_label = T("Staff/Volunteer")
def response_represent(opt):
if opt is None:
return current.messages["NONE"]
elif opt:
return T("Yes")
else:
return T("No")
# ---------------------------------------------------------------------
# Staff assigned to an Incident
#
msg_enabled = settings.has_module("msg")
tablename = "irs_ireport_human_resource"
define_table(tablename,
ireport_id(),
# @ToDo: Limit Staff to those which are not already assigned to an Incident
human_resource_id(label = hrm_label,
# Simple dropdown is faster for a small team
#widget=None,
#comment=None,
),
Field("incident_commander", "boolean",
default = False,
label = T("Incident Commander"),
represent = lambda incident_commander: \
(T("No"),
T("Yes"))[incident_commander == True]),
Field("response", "boolean",
default = None,
label = T("Able to Respond?"),
writable = msg_enabled,
readable = msg_enabled,
represent = response_represent,
),
s3_comments("reply",
label = T("Reply Message"),
writable = msg_enabled,
readable = msg_enabled
),
*s3_meta_fields())
configure(tablename,
list_fields=["id",
"human_resource_id",
"incident_commander",
"response",
"reply",
])
if not settings.has_module("vehicle"):
return Storage()
# ---------------------------------------------------------------------
# Vehicles assigned to an Incident
#
asset_id = self.asset_asset_id
tablename = "irs_ireport_vehicle"
define_table(tablename,
ireport_id(),
asset_id(label = T("Vehicle"),
# Limit Vehicles to those which are not already assigned to an Incident
requires = self.irs_vehicle_requires,
comment = S3AddResourceLink(
c="vehicle",
f="vehicle",
label=T("Add Vehicle"),
tooltip=T("If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.")),
),
s3_datetime("datetime",
default = "now",
future = 0,
label = T("Dispatch Time"),
),
self.super_link("site_id", "org_site",
label = T("Fire Station"),
readable = True,
# Populated from fire_station_vehicle
#writable = True
),
self.gis_location_id(label = T("Destination"),
),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
Field.Method("minutes",
self.irs_ireport_vehicle_minutes),
s3_comments(),
*s3_meta_fields())
configure(tablename, extra_fields = ["datetime"])
# ---------------------------------------------------------------------
# Which Staff are assigned to which Vehicle?
#
tablename = "irs_ireport_vehicle_human_resource"
define_table(tablename,
ireport_id(),
# @ToDo: Limit Staff to those which are not already assigned to an Incident
human_resource_id(label = hrm_label,
# Simple dropdown is faster for a small team
widget=None,
comment=None,
),
asset_id(label=T("Vehicle"),
# @ToDo: Limit to Vehicles which are assigned to this Incident
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "asset_asset.id",
self.asset_represent,
filterby="type",
filter_opts=(1,),
sort=True)),
comment = S3AddResourceLink(
c="vehicle",
f="vehicle",
label=T("Add Vehicle"),
tooltip=T("If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.")),
),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Return model-global names to s3db.*
#
return Storage(
)
# -------------------------------------------------------------------------
@staticmethod
def irs_vehicle_requires():
"""
Populate the dropdown widget for responding to an Incident Report
based on those vehicles which aren't already on-call
"""
# Vehicles are a type of Asset
s3db = current.s3db
table = s3db.asset_asset
ltable = s3db.irs_ireport_vehicle
asset_represent = s3db.asset_asset_id.represent
# Filter to Vehicles which aren't already on a call
# @ToDo: Filter by Org/Base
# @ToDo: Filter out those which are under repair
query = (table.type == s3db.asset_types["VEHICLE"]) & \
(table.deleted == False) & \
((ltable.id == None) | \
(ltable.closed == True) | \
(ltable.deleted == True))
left = ltable.on(table.id == ltable.asset_id)
requires = IS_EMPTY_OR(IS_ONE_OF(current.db(query),
"asset_asset.id",
asset_represent,
left=left,
sort=True))
return requires
# -------------------------------------------------------------------------
@staticmethod
def irs_ireport_vehicle_minutes(row):
if hasattr(row, "irs_ireport_vehicle"):
row = "irs_ireport_vehicle"
if hasattr(row, "datetime") and row.datetime:
return int((current.request.utcnow - row.datetime) / 60)
else:
return 0
# =============================================================================
def irs_rheader(r, tabs=[]):
""" Resource component page header """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
s3db = current.s3db
settings = current.deployment_settings
hrm_label = T("Responder(s)")
tabs = [(T("Report Details"), None),
(T("Photos"), "image"),
(T("Documents"), "document"),
(T("Affected Persons"), "person"),
]
if settings.get_irs_vehicle():
tabs.append((T("Vehicles"), "vehicle"))
tabs.append((hrm_label, "human_resource"))
tabs.append((T("Tasks"), "task"))
if settings.has_module("msg"):
tabs.append((T("Dispatch"), "dispatch"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if r.name == "ireport":
report = r.record
table = r.table
datetime = table.datetime.represent(report.datetime)
expiry = table.datetime.represent(report.expiry)
location = table.location_id.represent(report.location_id)
category = table.category.represent(report.category) or ""
contact = ""
if report.person:
if report.contact:
contact = "%s (%s)" % (report.person, report.contact)
else:
contact = report.person
elif report.contact:
contact = report.contact
if contact:
contact = DIV(TH("%s: " % T("Contact")), TD(contact))
#create_request = A(T("Create Request"),
# _class="action-btn s3_add_resource_link",
# _href=URL(c="req", f="req",
# args="create",
# vars={"format":"popup",
# "caller":"irs_ireport"}),
# _title=T("Add Request"))
#create_task = A(T("Create Task"),
# _class="action-btn s3_add_resource_link",
# _href=URL(c="project", f="task",
# args="create",
# vars={"format":"popup",
# "caller":"irs_ireport"}),
# _title=T("Create Task"))
rheader = DIV(TABLE(
TR(
TH("%s: " % table.name.label), report.name,
TH("%s: " % table.datetime.label), datetime,
),
TR(
TH("%s: " % table.category.label), category,
TH("%s: " % table.expiry.label), expiry,
),
TR(
TH("%s: " % table.location_id.label), location,
contact,
),
TR(
TH("%s: " % table.message.label), TD(report.message or "",
_colspan=3),
)
),
#DIV(P(), create_request, " ", create_task, P()),
rheader_tabs)
return rheader
else:
return None
# END =========================================================================
|
devinbalkind/eden
|
modules/s3db/irs.py
|
Python
|
mit
| 56,474
|
[
"VisIt"
] |
01ef7b4b0893e645712274456837fc61774daaf1099870ea44fc1fbcbd77756c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2008 Robert Cheramy <robert@cheramy.net>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to GEDCOM"
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import time
import io
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import (AttributeType, ChildRefType, Citation, Date,
EventRoleType, EventType, LdsOrd, NameType,
PlaceType, NoteType, Person, UrlType,
SrcAttributeType)
from gramps.version import VERSION
import gramps.plugins.lib.libgedcom as libgedcom
from gramps.gen.errors import DatabaseError
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.constfunc import cuni
from gramps.gen.utils.location import get_main_location
#-------------------------------------------------------------------------
#
# GEDCOM tags representing attributes that may take a parameter, value or
# description on the same line as the tag
#
#-------------------------------------------------------------------------
NEEDS_PARAMETER = set(
["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI",
"NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"])
LDS_ORD_NAME = {
LdsOrd.BAPTISM : 'BAPL',
LdsOrd.ENDOWMENT : 'ENDL',
LdsOrd.SEAL_TO_PARENTS : 'SLGC',
LdsOrd.SEAL_TO_SPOUSE : 'SLGS',
LdsOrd.CONFIRMATION : 'CONL',
}
LDS_STATUS = {
LdsOrd.STATUS_BIC : "BIC",
LdsOrd.STATUS_CANCELED : "CANCELED",
LdsOrd.STATUS_CHILD : "CHILD",
LdsOrd.STATUS_CLEARED : "CLEARED",
LdsOrd.STATUS_COMPLETED : "COMPLETED",
LdsOrd.STATUS_DNS : "DNS",
LdsOrd.STATUS_INFANT : "INFANT",
LdsOrd.STATUS_PRE_1970 : "PRE-1970",
LdsOrd.STATUS_QUALIFIED : "QUALIFIED",
LdsOrd.STATUS_DNS_CAN : "DNS/CAN",
LdsOrd.STATUS_STILLBORN : "STILLBORN",
LdsOrd.STATUS_SUBMITTED : "SUBMITTED" ,
LdsOrd.STATUS_UNCLEARED : "UNCLEARED",
}
LANGUAGES = {
'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch',
'en' : 'English', 'eo' : 'Esperanto', 'fi' : 'Finnish',
'fr' : 'French', 'de' : 'German', 'hu' : 'Hungarian',
'it' : 'Italian', 'lt' : 'Latvian', 'lv' : 'Lithuanian',
'no' : 'Norwegian', 'po' : 'Polish', 'pt' : 'Portuguese',
'ro' : 'Romanian', 'sk' : 'Slovak', 'es' : 'Spanish',
'sv' : 'Swedish', 'ru' : 'Russian',
}
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
MIME2GED = {
"image/bmp" : "bmp",
"image/gif" : "gif",
"image/jpeg" : "jpeg",
"image/x-pcx" : "pcx",
"image/tiff" : "tiff",
"audio/x-wav" : "wav"
}
QUALITY_MAP = {
Citation.CONF_VERY_HIGH : "3",
Citation.CONF_HIGH : "2",
Citation.CONF_LOW : "1",
Citation.CONF_VERY_LOW : "0",
}
#-------------------------------------------------------------------------
#
# sort_handles_by_id
#
#-------------------------------------------------------------------------
def sort_handles_by_id(handle_list, handle_to_object):
"""
Sort a list of handles by the Gramps ID.
The function that returns the object from the handle needs to be supplied
so that we get the right object.
"""
sorted_list = []
for handle in handle_list:
obj = handle_to_object(handle)
if obj:
data = (obj.get_gramps_id(), handle)
sorted_list.append (data)
sorted_list.sort()
return sorted_list
#-------------------------------------------------------------------------
#
# breakup
#
#-------------------------------------------------------------------------
def breakup(txt, limit):
"""
Break a line of text into a list of strings that conform to the
maximum length specified, while breaking words in the middle of a word
to avoid issues with spaces.
"""
if limit < 1:
raise ValueError("breakup: unexpected limit: %r" % limit)
data = []
while len(txt) > limit:
# look for non-space pair to break between
# do not break within a UTF-8 byte sequence, i. e. first char >127
idx = limit
while (idx>0 and (txt[idx-1].isspace() or txt[idx].isspace()
or ord(txt[idx-1]) > 127)):
idx -= 1
if idx == 0:
#no words to break on, just break at limit anyway
idx = limit
data.append(txt[:idx])
txt = txt[idx:]
if len(txt) > 0:
data.append(txt)
return data
#-------------------------------------------------------------------------
#
# event_has_subordinate_data
# may want to compare description w/ auto-generated one, and
# if so, treat it same as if it were empty for this purpose
#
#-------------------------------------------------------------------------
def event_has_subordinate_data(event, event_ref):
if event and event_ref:
return (event.get_description().strip() or
not event.get_date_object().is_empty() or
event.get_place_handle() or
event.get_attribute_list() or
event_ref.get_attribute_list() or
event.get_note_list() or
event.get_citation_list() or
event.get_media_list())
else:
return False
#-------------------------------------------------------------------------
#
# GedcomWriter class
#
#-------------------------------------------------------------------------
class GedcomWriter(UpdateCallback):
"""
The GEDCOM writer creates a GEDCOM file that contains the exported
information from the database. It derives from UpdateCallback
so that it can provide visual feedback via a progress bar if needed.
"""
def __init__(self, database, user, option_box=None):
UpdateCallback.__init__(self, user.callback)
self.total = 100
self.dbase = database
self.dirname = None
self.gedcom_file = None
# The number of different stages other than any of the optional filters
# which the write_gedcom_file method will call.
self.progress_cnt = 5
self.setup(option_box)
def setup(self, option_box):
"""
If the option_box is present (GUI interface), then we check the
"private", "restrict", and "cfilter" arguments to see if we need
to apply proxy databases.
"""
if option_box:
option_box.parse_options()
self.dbase = option_box.get_filtered_database(self.dbase, self)
def write_gedcom_file(self, filename):
"""
Write the actual GEDCOM file to the specified filename.
"""
self.dirname = os.path.dirname (filename)
self.gedcom_file = io.open(filename, "w", encoding='utf-8')
self._header(filename)
self._submitter()
self._individuals()
self._families()
self._sources()
self._repos()
self._notes()
self._writeln(0, "TRLR")
self.gedcom_file.close()
return True
def _writeln(self, level, token, textlines="", limit=72):
"""
Write a line of text to the output file in the form of:
LEVEL TOKEN text
If the line contains newlines, it is broken into multiple lines using
the CONT token. If any line is greater than the limit, it will broken
into multiple lines using CONC.
"""
assert(token)
if textlines:
# break the line into multiple lines if a newline is found
textlines = textlines.replace('\n\r', '\n')
textlines = textlines.replace('\r', '\n')
textlist = textlines.split('\n')
token_level = level
for text in textlist:
# make it unicode so that breakup below does the right thin.
text = cuni(text)
if limit:
prefix = cuni("\n%d CONC " % (level + 1))
txt = prefix.join(breakup(text, limit))
else:
txt = text
self.gedcom_file.write(cuni("%d %s %s\n" % (token_level, token, txt)))
token_level = level + 1
token = "CONT"
else:
self.gedcom_file.write(cuni("%d %s\n" % (level, token)))
def _header(self, filename):
"""
Write the GEDCOM header.
HEADER:=
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1} # Not used
+3 <<ADDRESS_STRUCTURE>> {0:1} # Not used
+2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used
+3 DATE <PUBLICATION_DATE> {0:1} # Not used
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @XREF:SUBM@ {1:1}
+1 SUBN @XREF:SUBN@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
"""
local_time = time.localtime(time.time())
(year, mon, day, hour, minutes, sec) = local_time[0:6]
date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year)
time_str = "%02d:%02d:%02d" % (hour, minutes, sec)
rname = self.dbase.get_researcher().get_name()
self._writeln(0, "HEAD")
self._writeln(1, "SOUR", "Gramps")
self._writeln(2, "VERS", VERSION)
self._writeln(2, "NAME", "Gramps")
self._writeln(1, "DATE", date_str)
self._writeln(2, "TIME", time_str)
self._writeln(1, "SUBM", "@SUBM@")
self._writeln(1, "FILE", filename, limit=255)
self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname))
self._writeln(1, "GEDC")
self._writeln(2, "VERS", "5.5")
self._writeln(2, "FORM", 'LINEAGE-LINKED')
self._writeln(1, "CHAR", "UTF-8")
# write the language string if the current LANG variable
# matches something we know about.
lang = glocale.language[0]
if lang and len(lang) >= 2:
lang_code = LANGUAGES.get(lang[0:2])
if lang_code:
self._writeln(1, 'LANG', lang_code)
def _submitter(self):
"""
n @<XREF:SUBM>@ SUBM {1:1}
+1 NAME <SUBMITTER_NAME> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} # not used
+1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used
+1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used
+1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used
+1 <<CHANGE_DATE>> {0:1} # not used
"""
owner = self.dbase.get_researcher()
name = owner.get_name()
phon = owner.get_phone()
mail = owner.get_email()
self._writeln(0, "@SUBM@", "SUBM")
self._writeln(1, "NAME", name)
# Researcher is a sub-type of LocationBase, so get_city etc. which are
# used in __write_addr work fine. However, the database owner street is
# stored in address, so we need to temporarily copy it into street so
# __write_addr works properly
owner.set_street(owner.get_address())
self.__write_addr(1, owner)
if phon:
self._writeln(1, "PHON", phon)
if mail:
self._writeln(1, "EMAIL", mail)
def _individuals(self):
"""
Write the individual people to the gedcom file.
Since people like to have the list sorted by ID value, we need to go
through a sorting step. We need to reset the progress bar, otherwise,
people will be confused when the progress bar is idle.
"""
self.reset(_("Writing individuals"))
self.progress_cnt += 1
self.update(self.progress_cnt)
phandles = self.dbase.iter_person_handles()
sorted_list = []
for handle in phandles:
person = self.dbase.get_person_from_handle(handle)
if person:
data = (person.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
for data in sorted_list:
self._person(self.dbase.get_person_from_handle(data[1]))
def _person(self, person):
"""
Write out a single person.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1} # not used
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if person is None:
return
self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI")
self._names(person)
self._gender(person)
self._person_event_ref('BIRT', person.get_birth_ref())
self._person_event_ref('DEAT', person.get_death_ref())
self._remaining_events(person)
self._attributes(person)
self._lds_ords(person, 1)
self._child_families(person)
self._parent_families(person)
self._assoc(person, 1)
self._person_sources(person)
self._addresses(person)
self._photos(person.get_media_list(), 1)
self._url_list(person, 1)
self._note_references(person.get_note_list(), 1)
self._change(person.get_change_time(), 1)
def _assoc(self, person, level):
"""
n ASSO @<XREF:INDI>@ {0:M}
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
"""
for ref in person.get_person_ref_list():
person = self.dbase.get_person_from_handle(ref.ref)
if person:
self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id())
self._writeln(level+1, "RELA", ref.get_relation())
self._note_references(ref.get_note_list(), level+1)
self._source_references(ref.get_citation_list(), level+1)
def _note_references(self, notelist, level):
"""
Write out the list of note handles to the current level.
We use the Gramps ID as the XREF for the GEDCOM file.
"""
for note_handle in notelist:
note = self.dbase.get_note_from_handle(note_handle)
if note:
self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id())
def _names(self, person):
"""
Write the names associated with the person to the current level.
Since nicknames in version < 3.3 are separate from the name structure,
we search the attribute list to see if we can find a nickname.
Because we do not know the mappings, we just take the first nickname
we find, and add it to the primary name.
If a nickname is present in the name structure, it has precedence
"""
nicknames = [ attr.get_value() for attr in person.get_attribute_list()
if int(attr.get_type()) == AttributeType.NICKNAME ]
if len(nicknames) > 0:
nickname = nicknames[0]
else:
nickname = ""
self._person_name(person.get_primary_name(), nickname)
for name in person.get_alternate_names():
self._person_name(name, "")
def _gender(self, person):
"""
Write out the gender of the person to the file.
If the gender is not male or female, simply do not output anything.
The only valid values are M (male) or F (female). So if the geneder is
unknown, we output nothing.
"""
if person.get_gender() == Person.MALE:
self._writeln(1, "SEX", "M")
elif person.get_gender() == Person.FEMALE:
self._writeln(1, "SEX", "F")
def _lds_ords(self, obj, level):
"""
Simply loop through the list of LDS ordinances, and call the function
that writes the LDS ordinance structure.
"""
for lds_ord in obj.get_lds_ord_list():
self.write_ord(lds_ord, level)
def _remaining_events(self, person):
"""
Output all events associated with the person that are not BIRTH or
DEATH events.
Because all we have are event references, we have to
extract the real event to discover the event type.
"""
for event_ref in person.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if not event: continue
self._process_person_event(event, event_ref)
self._adoption_records(person)
def _process_person_event(self, event, event_ref):
"""
Process a person event, which is not a BIRTH or DEATH event.
"""
etype = int(event.get_type())
# if the event is a birth or death, skip it.
if etype in (EventType.BIRTH, EventType.DEATH):
return
role = int(event_ref.get_role())
# if the event role is not primary, skip the event.
if role != EventRoleType.PRIMARY:
return
val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip()
if val and val.strip():
if val in NEEDS_PARAMETER:
if event.get_description().strip():
self._writeln(1, val, event.get_description())
else:
self._writeln(1, val)
else:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_description().strip():
self._writeln(2, 'TYPE', event.get_description())
else:
self._writeln(1, 'EVEN')
if val.strip():
self._writeln(2, 'TYPE', val)
else:
self._writeln(2, 'TYPE', str(event.get_type()))
descr = event.get_description()
if descr:
self._writeln(2, 'NOTE', "Description: " + descr)
self._dump_event_stats(event, event_ref)
def _adoption_records(self, person):
"""
Write Adoption events for each child that has been adopted.
n ADOP
+1 <<INDIVIDUAL_EVENT_DETAIL>>
+1 FAMC @<XREF:FAM>@
+2 ADOP <ADOPTED_BY_WHICH_PARENT>
"""
adoptions = []
for family in [ self.dbase.get_family_from_handle(fh)
for fh in person.get_parent_family_handle_list() ]:
if family is None:
continue
for child_ref in [ ref for ref in family.get_child_ref_list()
if ref.ref == person.handle ]:
if child_ref.mrel == ChildRefType.ADOPTED \
or child_ref.frel == ChildRefType.ADOPTED:
adoptions.append((family, child_ref.frel, child_ref.mrel))
for (fam, frel, mrel) in adoptions:
self._writeln(1, 'ADOP', 'Y')
self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id())
if mrel == frel:
self._writeln(3, 'ADOP', 'BOTH')
elif mrel == ChildRefType.ADOPTED:
self._writeln(3, 'ADOP', 'WIFE')
else:
self._writeln(3, 'ADOP', 'HUSB')
def _attributes(self, person):
"""
Write out the attributes to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
# filter out the nicknames
attr_list = [ attr for attr in person.get_attribute_list()
if attr.get_type() != AttributeType.NICKNAME ]
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().strip().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID", "_FSFTID"):
self._writeln(1, key, value)
continue
if key == "RESN":
self._writeln(1, 'RESN')
continue
if name and name.strip():
self._writeln(1, name, value)
elif value:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
else:
continue
self._note_references(attr.get_note_list(), 2)
self._source_references(attr.get_citation_list(), 2)
def _source_references(self, citation_list, level):
"""
Loop through the list of citation handles, writing the information
to the file.
"""
for citation_handle in citation_list:
self._source_ref_record(level, citation_handle)
def _addresses(self, person):
"""
Write out the addresses associated with the person as RESI events.
"""
for addr in person.get_address_list():
self._writeln(1, 'RESI')
self._date(2, addr.get_date_object())
self.__write_addr(2, addr)
if addr.get_phone():
self._writeln(2, 'PHON', addr.get_phone())
self._note_references(addr.get_note_list(), 2)
self._source_references(addr.get_citation_list(), 2)
def _photos(self, media_list, level):
"""
Loop through the list of media objects, writing the information
to the file.
"""
for photo in media_list:
self._photo(photo, level)
def _child_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a child.
"""
# get the list of familes from the handle list
family_list = [ self.dbase.get_family_from_handle(hndl)
for hndl in person.get_parent_family_handle_list() ]
for family in family_list:
if family:
self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id())
def _parent_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a parent.
"""
# get the list of familes from the handle list
family_list = [ self.dbase.get_family_from_handle(hndl)
for hndl in person.get_family_handle_list() ]
for family in family_list:
if family:
self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id())
def _person_sources(self, person):
"""
Loop through the list of citations, writing the information
to the file.
"""
for citation_handle in person.get_citation_list():
self._source_ref_record(1, citation_handle)
def _url_list(self, obj, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
for url in obj.get_url_list():
self._writeln(level, 'OBJE')
self._writeln(level+1, 'FORM', 'URL')
if url.get_description():
self._writeln(level+1, 'TITL', url.get_description())
if url.get_path():
self._writeln(level+1, 'FILE', url.get_path(), limit=255)
def _families(self):
"""
Write out the list of families, sorting by Gramps ID.
"""
self.reset(_("Writing families"))
self.progress_cnt += 1
self.update(self.progress_cnt)
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_family_handles(),
self.dbase.get_family_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for family_handle in [hndl[1] for hndl in sorted_list]:
self._family(self.dbase.get_family_from_handle(family_handle))
def _family(self, family):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1)
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
"""
if family is None:
return
gramps_id = family.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'FAM' )
self._family_reference('HUSB', family.get_father_handle())
self._family_reference('WIFE', family.get_mother_handle())
self._lds_ords(family, 1)
self._family_events(family)
self._family_attributes(family.get_attribute_list(), 1)
self._family_child_list(family.get_child_ref_list())
self._source_references(family.get_citation_list(), 1)
self._photos(family.get_media_list(), 1)
self._note_references(family.get_note_list(), 1)
self._change(family.get_change_time(), 1)
def _family_child_list(self, child_ref_list):
"""
Write the child XREF values to the GEDCOM file.
"""
child_list = [
self.dbase.get_person_from_handle(cref.ref).get_gramps_id()
for cref in child_ref_list]
for gid in child_list:
if gid is None: continue
self._writeln(1, 'CHIL', '@%s@' % gid)
def _family_reference(self, token, person_handle):
"""
Write the family reference to the file.
This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the
XREF value.
"""
if person_handle:
person = self.dbase.get_person_from_handle(person_handle)
if person:
self._writeln(1, token, '@%s@' % person.get_gramps_id())
def _family_events(self, family):
"""
Output the events associated with the family.
Because all we have are event references, we have to extract the real
event to discover the event type.
"""
for event_ref in family.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None: continue
self._process_family_event(event, event_ref)
self._dump_event_stats(event, event_ref)
def _process_family_event(self, event, event_ref):
"""
Process a single family event.
"""
etype = int(event.get_type())
val = libgedcom.FAMILYCONSTANTEVENTS.get(etype)
if val:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_type() == EventType.MARRIAGE:
self._family_event_attrs(event.get_attribute_list(), 2)
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
else:
self._writeln(1, 'EVEN')
the_type = str(event.get_type())
if the_type:
self._writeln(2, 'TYPE', the_type)
descr = event.get_description()
if descr:
self._writeln(2, 'NOTE', "Description: " + descr)
def _family_event_attrs(self, attr_list, level):
"""
Write the attributes associated with the family event.
The only ones we really care about are FATHER_AGE and MOTHER_AGE which
we translate to WIFE/HUSB AGE attributes.
"""
for attr in attr_list:
if attr.get_type() == AttributeType.FATHER_AGE:
self._writeln(level, 'HUSB')
self._writeln(level+1, 'AGE', attr.get_value())
elif attr.get_type() == AttributeType.MOTHER_AGE:
self._writeln(level, 'WIFE')
self._writeln(level+1, 'AGE', attr.get_value())
def _family_attributes(self, attr_list, level):
"""
Write out the attributes associated with a family to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, key, value)
continue
if name and name.strip():
self._writeln(1, name, value)
continue
else:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
self._note_references(attr.get_note_list(), level+1)
self._source_references(attr.get_citation_list(),
level+1)
def _sources(self):
"""
Write out the list of sources, sorting by Gramps ID.
"""
self.reset(_("Writing sources"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_source_handles(),
self.dbase.get_source_from_handle)
for (source_id, handle) in sorted_list:
source = self.dbase.get_source_from_handle(handle)
if source is None: continue
self._writeln(0, '@%s@' % source_id, 'SOUR')
if source.get_title():
self._writeln(1, 'TITL', source.get_title())
if source.get_author():
self._writeln(1, "AUTH", source.get_author())
if source.get_publication_info():
self._writeln(1, "PUBL", source.get_publication_info())
if source.get_abbreviation():
self._writeln(1, 'ABBR', source.get_abbreviation())
self._photos(source.get_media_list(), 1)
for reporef in source.get_reporef_list():
self._reporef(reporef, 1)
break
self._note_references(source.get_note_list(), 1)
self._change(source.get_change_time(), 1)
def _notes(self):
"""
Write out the list of notes, sorting by Gramps ID.
"""
self.reset(_("Writing notes"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_note_handles(),
self.dbase.get_note_from_handle)
for note_handle in [hndl[1] for hndl in sorted_list]:
note = self.dbase.get_note_from_handle(note_handle)
if note is None: continue
self._note_record(note)
def _note_record(self, note):
"""
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if note:
self._writeln(0, '@%s@' % note.get_gramps_id(), 'NOTE ' + note.get())
def _repos(self):
"""
Write out the list of repositories, sorting by Gramps ID.
REPOSITORY_RECORD:=
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
self.reset(_("Writing repositories"))
self.progress_cnt += 1
self.update(self.progress_cnt)
sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(),
self.dbase.get_repository_from_handle)
# GEDCOM only allows for a single repository per source
for (repo_id, handle) in sorted_list:
repo = self.dbase.get_repository_from_handle(handle)
if repo is None: continue
self._writeln(0, '@%s@' % repo_id, 'REPO' )
if repo.get_name():
self._writeln(1, 'NAME', repo.get_name())
for addr in repo.get_address_list():
self.__write_addr(1, addr)
if addr.get_phone():
self._writeln(1, 'PHON', addr.get_phone())
for url in repo.get_url_list():
if int(url.get_type()) == UrlType.EMAIL:
self._writeln(1, 'EMAIL', url.get_path())
elif int(url.get_type()) == UrlType.WEB_HOME:
self._writeln(1, 'WWW', url.get_path())
self._note_references(repo.get_note_list(), 1)
def _reporef(self, reporef, level):
"""
n REPO [ @XREF:REPO@ | <NULL>] {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 CALN <SOURCE_CALL_NUMBER> {0:M}
+2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
"""
if reporef.ref is None:
return
repo = self.dbase.get_repository_from_handle(reporef.ref)
if repo is None:
return
repo_id = repo.get_gramps_id()
self._writeln(level, 'REPO', '@%s@' % repo_id )
self._note_references(reporef.get_note_list(), level+1)
if reporef.get_call_number():
self._writeln(level+1, 'CALN', reporef.get_call_number() )
if reporef.get_media_type():
self._writeln(level+2, 'MEDI', str(reporef.get_media_type()))
def _person_event_ref(self, key, event_ref):
"""
Write out the BIRTH and DEATH events for the person.
"""
if event_ref:
event = self.dbase.get_event_from_handle(event_ref.ref)
if event_has_subordinate_data(event, event_ref):
self._writeln(1, key)
else:
self._writeln(1, key, 'Y')
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
self._dump_event_stats(event, event_ref)
def _change(self, timeval, level):
"""
CHANGE_DATE:=
n CHAN {1:1}
+1 DATE <CHANGE_DATE> {1:1}
+2 TIME <TIME_VALUE> {0:1}
+1 <<NOTE_STRUCTURE>> # not used
"""
self._writeln(level, 'CHAN')
time_val = time.localtime(timeval)
self._writeln(level+1, 'DATE', '%d %s %d' % (
time_val[2], libgedcom.MONTH[time_val[1]], time_val[0]))
self._writeln(level+2, 'TIME', '%02d:%02d:%02d' % (
time_val[3], time_val[4], time_val[5]))
def _dump_event_stats(self, event, event_ref):
"""
Write the event details for the event, using the event and event
reference information.
GEDCOM does not make a distinction between the two.
"""
dateobj = event.get_date_object()
self._date(2, dateobj)
if self._datewritten:
# write out TIME if present
times = [ attr.get_value() for attr in event.get_attribute_list()
if int(attr.get_type()) == AttributeType.TIME ]
# Not legal, but inserted by PhpGedView
if len(times) > 0:
time = times[0]
self._writeln(3, 'TIME', time)
place = None
if event.get_place_handle():
place = self.dbase.get_place_from_handle(event.get_place_handle())
self._place(place, 2)
for attr in event.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.CAUSE:
self._writeln(2, 'CAUS', attr.get_value())
elif attr_type == AttributeType.AGENCY:
self._writeln(2, 'AGNC', attr.get_value())
for attr in event_ref.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.AGE:
self._writeln(2, 'AGE', attr.get_value())
elif attr_type == AttributeType.FATHER_AGE:
self._writeln(2, 'HUSB')
self._writeln(3, 'AGE', attr.get_value())
elif attr_type == AttributeType.MOTHER_AGE:
self._writeln(2, 'WIFE')
self._writeln(3, 'AGE', attr.get_value())
self._note_references(event.get_note_list(), 2)
self._source_references(event.get_citation_list(), 2)
self._photos(event.get_media_list(), 2)
if place:
self._photos(place.get_media_list(), 2)
def write_ord(self, lds_ord, index):
"""
LDS_INDIVIDUAL_ORDINANCE:=
[
n [ BAPL | CONL ] {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M} p.39
|
n ENDL {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
|
n SLGC {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 FAMC @<XREF:FAM>@ {1:1}
+1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
]
"""
self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()])
self._date(index + 1, lds_ord.get_date_object())
if lds_ord.get_family_handle():
family_handle = lds_ord.get_family_handle()
family = self.dbase.get_family_from_handle(family_handle)
if family:
self._writeln(index+1, 'FAMC', '@%s@' % family.get_gramps_id())
if lds_ord.get_temple():
self._writeln(index+1, 'TEMP', lds_ord.get_temple())
if lds_ord.get_place_handle():
self._place(
self.dbase.get_place_from_handle(lds_ord.get_place_handle()), 2)
if lds_ord.get_status() != LdsOrd.STATUS_NONE:
self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()])
self._note_references(lds_ord.get_note_list(), index+1)
self._source_references(lds_ord.get_citation_list(), index+1)
def _date(self, level, date):
"""
Write the 'DATE' GEDCOM token, along with the date in GEDCOM's
expected format.
"""
self._datewritten = True
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = date.get_quality()
if mod == Date.MOD_SPAN:
val = "FROM %s TO %s" % (
libgedcom.make_gedcom_date(start, cal, mod, quality),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, quality))
elif mod == Date.MOD_RANGE:
val = "BET %s AND %s" % (
libgedcom.make_gedcom_date(start, cal, mod, quality),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, quality))
else:
val = libgedcom.make_gedcom_date(start, cal, mod, quality)
self._writeln(level, 'DATE', val)
elif date.get_text():
self._writeln(level, 'DATE', date.get_text())
else:
self._datewritten = False
def _person_name(self, name, attr_nick):
"""
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
gedcom_name = name.get_gedcom_name()
firstname = name.get_first_name().strip()
surns = []
surprefs = []
for surn in name.get_surname_list():
surns.append(surn.get_surname().replace('/', '?'))
if surn.get_connector():
#we store connector with the surname
surns[-1] = surns[-1] + ' ' + surn.get_connector()
surprefs.append(surn.get_prefix().replace('/', '?'))
surname = ', '.join(surns)
surprefix = ', '.join(surprefs)
suffix = name.get_suffix()
title = name.get_title()
nick = name.get_nick_name()
if nick.strip() == '':
nick = attr_nick
self._writeln(1, 'NAME', gedcom_name)
if int(name.get_type()) == NameType.BIRTH:
pass
elif int(name.get_type()) == NameType.MARRIED:
self._writeln(2, 'TYPE', 'married')
elif int(name.get_type()) == NameType.AKA:
self._writeln(2, 'TYPE', 'aka')
else:
self._writeln(2, 'TYPE', name.get_type().xml_str())
if firstname:
self._writeln(2, 'GIVN', firstname)
if surprefix:
self._writeln(2, 'SPFX', surprefix)
if surname:
self._writeln(2, 'SURN', surname)
if name.get_suffix():
self._writeln(2, 'NSFX', suffix)
if name.get_title():
self._writeln(2, 'NPFX', title)
if nick:
self._writeln(2, 'NICK', nick)
self._source_references(name.get_citation_list(), 2)
self._note_references(name.get_note_list(), 2)
def _source_ref_record(self, level, citation_handle):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1}
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1}
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1}
+2 ROLE <ROLE_IN_EVENT> {0:1}
+1 DATA {0:1}
+2 DATE <ENTRY_RECORDING_DATE> {0:1}
+2 TEXT <TEXT_FROM_SOURCE> {0:M}
+3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
"""
citation = self.dbase.get_citation_from_handle(citation_handle)
src_handle = citation.get_reference_handle()
if src_handle is None:
return
src = self.dbase.get_source_from_handle(src_handle)
if src is None:
return
# Reference to the source
self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id())
if citation.get_page() != "":
# PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines.
# WHERE_WITHIN_SOURCE:= {Size=1:248}
# Maximize line to 248 and set limit to 248, for no line split
self._writeln(level+1, 'PAGE', citation.get_page()[0:248],
limit=248)
conf = min(citation.get_confidence_level(),
Citation.CONF_VERY_HIGH)
if conf != Citation.CONF_NORMAL and conf != -1:
self._writeln(level+1, "QUAY", QUALITY_MAP[conf])
if not citation.get_date_object().is_empty():
self._writeln(level+1, 'DATA')
self._date(level+2, citation.get_date_object())
if len(citation.get_note_list()) > 0:
note_list = [ self.dbase.get_note_from_handle(h)
for h in citation.get_note_list() ]
note_list = [ n for n in note_list
if n.get_type() == NoteType.SOURCE_TEXT]
if note_list:
ref_text = note_list[0].get()
else:
ref_text = ""
if ref_text != "" and citation.get_date_object().is_empty():
self._writeln(level+1, 'DATA')
if ref_text != "":
self._writeln(level+2, "TEXT", ref_text)
note_list = [ self.dbase.get_note_from_handle(h)
for h in citation.get_note_list() ]
note_list = [ n.handle for n in note_list
if n and n.get_type() != NoteType.SOURCE_TEXT]
self._note_references(note_list, level+1)
self._photos(citation.get_media_list(), level+1)
even = None
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN":
even = srcattr.value
self._writeln(level+1, "EVEN", even)
break
if even:
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN:ROLE":
self._writeln(level+2, "ROLE", srcattr.value)
break
def _photo(self, photo, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
photo_obj_id = photo.get_reference_handle()
photo_obj = self.dbase.get_object_from_handle(photo_obj_id)
if photo_obj:
mime = photo_obj.get_mime_type()
form = MIME2GED.get(mime, mime)
path = media_path_full(self.dbase, photo_obj.get_path())
if not os.path.isfile(path):
return
self._writeln(level, 'OBJE')
if form:
self._writeln(level+1, 'FORM', form)
self._writeln(level+1, 'TITL', photo_obj.get_description())
self._writeln(level+1, 'FILE', path, limit=255)
self._note_references(photo_obj.get_note_list(), level+1)
def _place(self, place, level):
"""
PLACE_STRUCTURE:=
n PLAC <PLACE_NAME> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used
+2 TYPE <PHONETIC_TYPE> {1:1}
+1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used
+2 TYPE <ROMANIZED_TYPE> {1:1}
+1 MAP {0:1}
+2 LATI <PLACE_LATITUDE> {1:1}
+2 LONG <PLACE_LONGITUDE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
if place is None: return
place_name = place.get_title()
self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120)
longitude = place.get_longitude()
latitude = place.get_latitude()
if longitude and latitude:
(latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM")
if longitude and latitude:
self._writeln(level+1, "MAP")
self._writeln(level+2, 'LATI', latitude)
self._writeln(level+2, 'LONG', longitude)
# The Gedcom standard shows that an optional address structure can
# be written out in the event detail.
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL
location = get_main_location(self.dbase, place)
street = location.get(PlaceType.STREET)
locality = location.get(PlaceType.LOCALITY)
city = location.get(PlaceType.CITY)
state = location.get(PlaceType.STATE)
country = location.get(PlaceType.COUNTRY)
postal_code = place.get_code()
if (street or locality or city or state or postal_code or country):
self._writeln(level, "ADDR", street)
if street:
self._writeln(level + 1, 'ADR1', street)
if locality:
self._writeln(level + 1, 'ADR2', locality)
if city:
self._writeln(level + 1, 'CITY', city)
if state:
self._writeln(level + 1, 'STAE', state)
if postal_code:
self._writeln(level + 1, 'POST', postal_code)
if country:
self._writeln(level + 1, 'CTRY', country)
self._note_references(place.get_note_list(), level+1)
def __write_addr(self, level, addr):
"""
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
This is done along the lines suggested by Tamura Jones in
http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382.
"GEDCOM writers should always use the structured address format,
and it use it for all addresses, including the submitter address and
their own corporate address." "Vendors that want their product to pass
even the strictest GEDCOM validation, should include export to the old
free-form format..." [This goes on to say the free-form should be an
option, but we have not made it an option in Gramps].
@param level: The level number for the ADDR tag
@type level: Integer
@param addr: The location or address
@type addr: [a super-type of] LocationBase
"""
if addr.get_street() or addr.get_locality() or addr.get_city() or \
addr.get_state() or addr.get_postal_code or addr.get_country():
self._writeln(level, 'ADDR', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'CONT', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CONT', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'CONT', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'CONT', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CONT', addr.get_country())
if addr.get_street():
self._writeln(level + 1, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CTRY', addr.get_country())
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def export_data(database, filename, user, option_box=None):
"""
External interface used to register with the plugin system.
"""
ret = False
try:
ged_write = GedcomWriter(database, user, option_box)
ret = ged_write.write_gedcom_file(filename)
except IOError as msg:
msg2 = _("Could not create %s") % filename
user.notify_error(msg2, str(msg))
except DatabaseError as msg:
user.notify_db_error("%s\n%s" % (_("GEDCOM Export failed"), str(msg)))
return ret
|
pmghalvorsen/gramps_branch
|
gramps/plugins/export/exportgedcom.py
|
Python
|
gpl-2.0
| 55,775
|
[
"Brian"
] |
7a4bfe7e34fe46c9b11df3d09646a2225cf0976341badb047e93b7a45ac81782
|
# -*- coding: utf-8 -*-
import vtk
import xc_base
import xc
from miscUtils import LogMessages as lmsg
import create_array_set_data
__author__= "Luis C. Pérez Tato (LCPT) Ana Ortega (AO_O)"
__copyright__= "Copyright 2015, LCPT AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@ciccp.es ana.Ortega@ciccp.es"
def VtkDefineActorKPoint(recordGrid, renderer, radius):
'''Returns a vtkActor to represent key-points in a rendering scene.
It defines the scale, orientation, rendering properties, textures, ...
:ivar recordGrid: unstructured grid (generic data set) to which incorporate
the actor KPoint
:ivar renderer: name of the renderer (lights, views, ...) to be used in the
display
:ivar radius: radius of the spheres to be employed in the KPoints
representation
'''
sphereSource= vtk.vtkSphereSource()
sphereSource.SetRadius(radius)
sphereSource.SetThetaResolution(5)
sphereSource.SetPhiResolution(5)
markKPts= vtk.vtkGlyph3D()
markKPts.SetInputData(recordGrid.uGrid)
markKPts.SetSourceData(sphereSource.GetOutput())
markKPts.ScalingOff()
markKPts.OrientOff()
mappKPts= vtk.vtkPolyDataMapper()
mappKPts.SetInputData(markKPts.GetOutput())
visKPts= vtk.vtkActor()
visKPts.SetMapper(mappKPts)
visKPts.GetProperty().SetColor(.7, .5, .5)
renderer.AddActor(visKPts)
def VtkDefineActorCells(recordGrid, renderer, tipoRepr):
''' Actor for the surfaces.'''
uGridMapper= vtk.vtkDataSetMapper()
uGridMapper.SetInputData(recordGrid.uGrid)
cellActor= vtk.vtkActor()
cellActor.SetMapper(uGridMapper)
cellActor.GetProperty().SetColor(1,1,0)
if (tipoRepr=="points"):
cellActor.GetProperty().SetRepresentationToPoints()
elif(tipoRepr== "wireframe"):
cellActor.GetProperty().SetRepresentationToWireframe()
elif(tipoRepr== "surface"):
cellActor.GetProperty().SetRepresentationToSurface()
else:
lmsg.error("error: "+tipoRepr+" no implementada.")
renderer.AddActor(cellActor) # Actor para las celdas
def VtkCargaIdsKPts(uGrid, setToDraw):
etiqKPt= create_array_set_data.VtkCreaStrArraySetData(setToDraw,'pnts','nombre')
uGrid.GetPointData().AddArray(etiqKPt)
def VtkCargaIdsCells(uGrid, setToDraw, entTypeName):
etiqCells= create_array_set_data.VtkCreaStrArraySetData(setToDraw,entTypeName,'nombre')
uGrid.GetCellData().AddArray(etiqCells)
def VtkDibujaIdsKPts(uGrid, setToDraw, renderer):
'''Draw the point labels.'''
numKPtsDI= setToDraw.getPoints.size
if(numKPtsDI>0):
ids= vtk.vtkIdFilter()
ids.SetInput(uGrid)
ids.CellIdsOff()
ids.PointIdsOff()
VtkCargaIdsKPts(uGrid,setToDraw)
visPts= vtk.vtkSelectVisiblePoints()
visPts.SetInputConnection(ids.GetOutputPort())
visPts.SetRenderer(renderer)
visPts.SelectionWindowOff()
#Create the mapper to display the point ids. Specify the format to
# use for the labels. Also create the associated actor.
ldm= vtk.vtkLabeledDataMapper()
ldm.SetInputConnection(visPts.GetOutputPort())
ldm.GetLabelTextProperty().SetColor(0.1,0.1,0.1)
pointLabels= vtk.vtkActor2D()
pointLabels.SetMapper(ldm)
renderer.AddActor2D(pointLabels)
else:
print "El conjunto: '",setToDraw,"' no tiene KPts."
# ****** Creamos las etiquetas para las celdas *******
def VtkDibujaIdsCells(uGrid, setToDraw, entTypeName, renderer):
ids= vtk.vtkIdFilter()
ids.SetInput(uGrid)
ids.CellIdsOff()
ids.PointIdsOff()
VtkCargaIdsCells(uGrid,setToDraw,entTypeName)
# Dibuja las etiquetas de las líneas.
cc= vtk.vtkCellCenters()
cc.SetInputConnection(ids.GetOutputPort()) # Centroides de las celdas.
visCells= vtk.vtkSelectVisiblePoints()
visCells.SetInputConnection(cc.GetOutputPort())
visCells.SetRenderer(renderer)
visCells.SelectionWindowOff()
#Create the mapper to display the cell ids. Specify the format to
# use for the labels. Also create the associated actor.
cellMapper= vtk.vtkLabeledDataMapper()
cellMapper.SetInputConnection(visCells.GetOutputPort())
cellMapper.GetLabelTextProperty().SetColor(0,0,0.9)
cellLabels= vtk.vtkActor2D()
cellLabels.SetMapper(cellMapper)
renderer.AddActor2D(cellLabels)
def VtkCargaMalla(recordGrid):
kpoints= vtk.vtkPoints()
# Definimos grid
recordGrid.uGrid.SetPoints(kpoints)
setToDraw= recordGrid.xcSet
setToDraw.numerate()
pnts= setToDraw.getPoints
for p in pnts:
kpoints.InsertPoint(p.getIdx,p.getPos.x,p.getPos.y,p.getPos.z)
cellSet= setToDraw.getLines # cells are lines as default.
if(recordGrid.cellType=="faces"):
cellSet= setToDraw.getSurfaces
elif (recordGrid.cellType=="bodies"):
cellSet= setToDraw.getBodies
for c in cellSet:
vertices= xc_base.vector_int_to_py_list(c.getIdxVertices)
vtx= vtk.vtkIdList()
for vIndex in vertices:
vtx.InsertNextId(vIndex)
recordGrid.uGrid.InsertNextCell(c.getVtkCellType,vtx)
|
lcpt/xc
|
python_modules/postprocess/xcVtk/cad_mesh.py
|
Python
|
gpl-3.0
| 4,973
|
[
"VTK"
] |
de5c572fbcf124bd7b00cc391aa12026d2b1139afba45e5ee46a30ddea66be21
|
"""
@name: Modules/House/Family/Replink/_test/test_core.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2020-2020 by D. Brian Kimmel
@note: Created on Feb 2, 2020
@license: MIT License
@summary: This
Passed all 1 tests - DBK - 2019-12-29
"""
__updated__ = '2020-02-02'
# Import system type stuff
from twisted.trial import unittest
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
"""
"""
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
class A0(unittest.TestCase):
def test_00_Print(self):
print('Id: test_setup_pyhouse')
_x = PrettyFormatAny.form('_test', 'title', 190) # so it is defined when printing is cleaned up.
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Family/Reolink/_test/test_reolink_device.py
|
Python
|
mit
| 881
|
[
"Brian"
] |
e341f25ea16a0794c6e52b4026568cb0e093615325f29bdc12cc196e726bbc35
|
import math
from ClimateUtilities import * #To get the math methods routines
#
#All units are mks units
#
#ToDo:
# * Add conversion factors (calories to joule,Megatons, etc.)
# * Unit conversion calculating object
# * Database of interesting constants (e.g energy and C
# content of coal)
# * Find a better way to organize database of contants,
# allowing indexing,unit information and description
# *Add the rest of the gases to the database, and find a
# better way to index them and provide help. Note that
# a printed table is more "transparent" in terms of what
# data is available. Find a way of providing a similar
# tabular summary of available data, and perhaps values
# This applies to the planet data table as well, and perhaps
# even to the table of physical constants.
#
# *Finish putting data into the gas database,
# perhaps including Van der Waals, Shomate and Antoine
# coefficients, at least in selected cases
#
#
#-------------Basic physical constants-------------------
#
#The following five are the most accurate 1986 values
#
h = 6.626075540e-34 #Planck's constant
c = 2.99792458e8 #Speed of light
k =1.38065812e-23 #Boltzman thermodynamic constant
sigma = 5.67051196e-8 #Stefan-Boltzman constant
G = 6.67428e-11 #Gravitational constant (2006 measurements)
#
#-----------Thermodynamic constants------------
#Following will come out in J/(deg kmol), so
#that dividing Rstar by molecular weight gives
#gas constant appropriate for mks units
N_avogadro = 6.022136736e23 #Avogadro's number
Rstar = 1000.*k*N_avogadro #Universal gas constant
#
#----------Properties of gases-----------------
#The following are approximate mean values
#for "normal" temperatures and pressures, suitable only
#for rough calculations.
#
#This class allows convenient access
#to the basic thermodynamic properties of
#a gas, and selected properties of its
#solid and liquid phases. The properties
#do not need to be specified in the __init__
#method, since they can be created dynamically.
#We specify them anyway, and set them to None,
#as a guide to the naming conventions for those
#creating their own gas objects. A utility is also
#available which will turn a LaTeX formatted thermodynamic table
#into a Python script defining new gas objects.
#
#
#
#ToDo: *Add more documentation and help features
#
# *provide dictionary of properties and units.
# *Add some methods or lists that make it easier
# for the user to create new gas objects and insert
# the data
class gas:
'''
A gas object stores thermodynamic data for
a gas, and selected properties of its condensed
phases. You can create a gas object for gas
G by executing:
G = gas()
and then setting the attributes individually (see
below for explanation of names). A collection
of gas objects for common gases is provided as part
of the phys module. These gas objects were not actually
created "by hand" but rather using a utility script that
automatically translates a LaTeX formatted table into a
Python script defining the objects.
Attributes of a gas object:
CriticalPointT: Critical point temperature (K)
CriticalPointP: Critical point pressure (Pa)
TriplePointT: Triple point temperature (K)
TriplePointP: Triple point pressure (Pa)
L_vaporization_BoilingPoint: Latent heat of vaporization (J/kg)
at boiling point. The so-called "boiling point" is
the temperature at which the saturation vapor pressure
equals 1 atmosphere (1.013 bar). For CO2, the "boiling point"
occurs below the triple point temperature, so the condensed
phase would not be a liquid. Hence, for CO2 the
latent heat is given at the arbitrary reference point
of 253K and 29Pa.
L_vaporization_TriplePoint: Latent heat of vaporization (J/kg)
at the triple point
L_fusion: Latent heat of fusion (J/kg) at the triple point
L_sublimation: Latent heat of sublimation (J/kg) at triple point
rho_liquid_BoilingPoint Liquid phase density (kg/m**3)
at the boiling point
rho_liquid_TriplePoint: Liquid phase density (kg/m**3)
at the triple point
rho_solid: Solid phase density (kg/m**3) at (or sometimes near)
the triple point
cp: Gas phase specific heat (J/(kg K)), at 298K and 1 bar
gamma: ratio of specific heat at constant pressure
to specific heat at constant volume. (Generally
stated at 298K and 1bar)
MolecularWeight: Molecular weight of the dominant isotope
name: Name of the gas
formula: Chemical formula (e.g. 'CH4')
L_vaporization: Default value to use for latent heat of
vaporization. Set to triple point value, if available,
else to boiling point value
rho_liquid: Default value to use for liquid phase density.
Set to triple point value if available otherwise
set to boiling point value
R: Gas constant for the individual gas. Computed from
other data as Rstar/MolecularWeight, when the update()
method is called
Rcp: The adiabatic exponent R/cp. Computed from other
data when the update() method is called.
'''
#The __repr__ method allows us to print out
#a help string when the user types the name
#of a gas object
def __repr__(self):
firstline =\
'This gas object contains thermodynamic data on %s\n'%self.formula
secondline = \
'Type \"help(gas)\" for more information\n'
return firstline+secondline
def __init__(self):
self.CriticalPointT = None
self.CriticalPointP = None
self.TriplePointT = None
self.TriplePointP = None
self.L_vaporization_BoilingPoint = None
self.L_vaporization_TriplePoint = None
self.L_fusion = None
self.L_sublimation = None
self.rho_liquid_BoilingPoint = None
self.rho_liquid_TriplePoint = None
self.rho_solid = None
self.cp = None
self.gamma = None
self.MolecularWeight = None
self.name = None
self.formula = None
#
#Default values for latent heat and liquid
#density. The triple point value is set
#as the default if it is available, otherwise
#the boiling point values are used.
self.L_vaporization= None
self.rho_liquid= None
#
#Computed quantities
#
self.R = None
self.Rcp = None
#Function to compute derived properties
def update(self):
self.R = Rstar/self.MolecularWeight
self.Rcp = self.R/self.cp
#Set properties of individual gases
#
#Thermodynamic properties of dry Earth air
air = gas()
air.name = 'Earth Air'
air.cp = 1004.
air.MolecularWeight = 28.97
air.gamma = 1.4003
#------------------------
H2O = gas()
H2O.CriticalPointT = 6.471000e+02
H2O.CriticalPointP = 2.210000e+07
H2O.TriplePointT = 2.731500e+02
H2O.TriplePointP = 6.110000e+02
H2O.L_vaporization_BoilingPoint = 2.255000e+06
H2O.L_vaporization_TriplePoint = 2.493000e+06
H2O.L_fusion = 3.340000e+05
H2O.L_sublimation = 2.840000e+06
H2O.rho_liquid_BoilingPoint = 9.584000e+02
H2O.rho_liquid_TriplePoint = 9.998700e+02
H2O.rho_solid = 9.170000e+02
H2O.cp = 1.847000e+03
H2O.gamma = 1.331000e+00
H2O.MolecularWeight = 1.800000e+01
H2O.name = 'Water'
H2O.formula = 'H2O'
H2O.L_vaporization=2.493000e+06
H2O.rho_liquid=9.998700e+02
#------------------------
CH4 = gas()
CH4.CriticalPointT = 1.904400e+02
CH4.CriticalPointP = 4.596000e+06
CH4.TriplePointT = 9.067000e+01
CH4.TriplePointP = 1.170000e+04
CH4.L_vaporization_BoilingPoint = 5.100000e+05
CH4.L_vaporization_TriplePoint = 5.360000e+05
CH4.L_fusion = 5.868000e+04
CH4.L_sublimation = 5.950000e+05
CH4.rho_liquid_BoilingPoint = 4.502000e+02
CH4.rho_liquid_TriplePoint = None
CH4.rho_solid = 5.093000e+02
CH4.cp = 2.195000e+03
CH4.gamma = 1.305000e+00
CH4.MolecularWeight = 1.600000e+01
CH4.name = 'Methane'
CH4.formula = 'CH4'
CH4.L_vaporization=5.360000e+05
CH4.rho_liquid=4.502000e+02
#------------------------
CO2 = gas()
CO2.CriticalPointT = 3.042000e+02
CO2.CriticalPointP = 7.382500e+06
CO2.TriplePointT = 2.165400e+02
CO2.TriplePointP = 5.185000e+05
CO2.L_vaporization_BoilingPoint = None
CO2.L_vaporization_TriplePoint = 3.970000e+05
CO2.L_fusion = 1.960000e+05
CO2.L_sublimation = 5.930000e+05
CO2.rho_liquid_BoilingPoint = 1.032000e+03
CO2.rho_liquid_TriplePoint = 1.110000e+03
CO2.rho_solid = 1.562000e+03
CO2.cp = 8.200000e+02
CO2.gamma = 1.294000e+00
CO2.MolecularWeight = 4.400000e+01
CO2.name = 'Carbon Dioxide'
CO2.formula = 'CO2'
CO2.L_vaporization=3.970000e+05
CO2.rho_liquid=1.110000e+03
#------------------------
N2 = gas()
N2.CriticalPointT = 1.262000e+02
N2.CriticalPointP = 3.400000e+06
N2.TriplePointT = 6.314000e+01
N2.TriplePointP = 1.253000e+04
N2.L_vaporization_BoilingPoint = 1.980000e+05
N2.L_vaporization_TriplePoint = 2.180000e+05
N2.L_fusion = 2.573000e+04
N2.L_sublimation = 2.437000e+05
N2.rho_liquid_BoilingPoint = 8.086000e+02
N2.rho_liquid_TriplePoint = None
N2.rho_solid = 1.026000e+03
N2.cp = 1.037000e+03
N2.gamma = 1.403000e+00
N2.MolecularWeight = 2.800000e+01
N2.name = 'Nitrogen'
N2.formula = 'N2'
N2.L_vaporization=2.180000e+05
N2.rho_liquid=8.086000e+02
#------------------------
O2 = gas()
O2.CriticalPointT = 1.545400e+02
O2.CriticalPointP = 5.043000e+06
O2.TriplePointT = 5.430000e+01
O2.TriplePointP = 1.500000e+02
O2.L_vaporization_BoilingPoint = 2.130000e+05
O2.L_vaporization_TriplePoint = 2.420000e+05
O2.L_fusion = 1.390000e+04
O2.L_sublimation = 2.560000e+05
O2.rho_liquid_BoilingPoint = 1.141000e+03
O2.rho_liquid_TriplePoint = 1.307000e+03
O2.rho_solid = 1.351000e+03
O2.cp = 9.160000e+02
O2.gamma = 1.393000e+00
O2.MolecularWeight = 3.200000e+01
O2.name = 'Oxygen'
O2.formula = 'O2'
O2.L_vaporization=2.420000e+05
O2.rho_liquid=1.307000e+03
#------------------------
H2 = gas()
H2.CriticalPointT = 3.320000e+01
H2.CriticalPointP = 1.298000e+06
H2.TriplePointT = 1.395000e+01
H2.TriplePointP = 7.200000e+03
H2.L_vaporization_BoilingPoint = 4.540000e+05
H2.L_vaporization_TriplePoint = None
H2.L_fusion = 5.820000e+04
H2.L_sublimation = None
H2.rho_liquid_BoilingPoint = 7.097000e+01
H2.rho_liquid_TriplePoint = None
H2.rho_solid = 8.800000e+01
H2.cp = 1.423000e+04
H2.gamma = 1.384000e+00
H2.MolecularWeight = 2.000000e+00
H2.name = 'Hydrogen'
H2.formula = 'H2'
H2.L_vaporization=4.540000e+05
H2.rho_liquid=7.097000e+01
#------------------------
He = gas()
He.CriticalPointT = 5.100000e+00
He.CriticalPointP = 2.280000e+05
He.TriplePointT = 2.170000e+00
He.TriplePointP = 5.070000e+03
He.L_vaporization_BoilingPoint = 2.030000e+04
He.L_vaporization_TriplePoint = None
He.L_fusion = None
He.L_sublimation = None
He.rho_liquid_BoilingPoint = 1.249600e+02
He.rho_liquid_TriplePoint = None
He.rho_solid = 2.000000e+02
He.cp = 5.196000e+03
He.gamma = 1.664000e+00
He.MolecularWeight = 4.000000e+00
He.name = 'Helium'
He.formula = 'He'
He.L_vaporization=2.030000e+04
He.rho_liquid=1.249600e+02
#------------------------
NH3 = gas()
NH3.CriticalPointT = 4.055000e+02
NH3.CriticalPointP = 1.128000e+07
NH3.TriplePointT = 1.954000e+02
NH3.TriplePointP = 6.100000e+03
NH3.L_vaporization_BoilingPoint = 1.371000e+06
NH3.L_vaporization_TriplePoint = 1.658000e+06
NH3.L_fusion = 3.314000e+05
NH3.L_sublimation = 1.989000e+06
NH3.rho_liquid_BoilingPoint = 6.820000e+02
NH3.rho_liquid_TriplePoint = 7.342000e+02
NH3.rho_solid = 8.226000e+02
NH3.cp = 2.060000e+03
NH3.gamma = 1.309000e+00
NH3.MolecularWeight = 1.700000e+01
NH3.name = 'Ammonia'
NH3.formula = 'NH3'
NH3.L_vaporization=1.658000e+06
NH3.rho_liquid=7.342000e+02
#------------------------
#------------------------
#Synonym for H2O
water = H2O
#Make a list of all the gases
#
#This clever little fragment uses the fact that
#Python can execute any string as a Python statement,
#in order to find all the gases and build a list of them.
#I don't know if there is a more straightforward way to
#get a list of all the objects of a certain type, but this
#works. Some of the trickery below is needed because
#the dir() command returns a list of strings, which
#give the names of the objects. It doesn't give the objects
#themselves.
gases = []
for ob in dir():
exec('isGas=isinstance('+ob+',gas)')
if isGas:
exec('gases.append('+ob+')')
#Update all the gases
for gas1 in gases:
gas1.update()
#
#
#----------------Radiation related functions-------------
#Planck function (of frequency)
def B(nu,T):
u = min(h*nu/(k*T),500.) #To prevent overflow
return (2.*h*nu**3/c**2)/(math.exp(u)-1.)
#
#
#----------Saturation Vapor Pressure functions---------------
#
#Saturation vapor pressure over ice (Smithsonian formula)
# Input: Kelvin. Output: Pascal
def satvpi(T):
#
# Compute es over ice (valid between -153 c and 0 c)
# see smithsonian meteorological tables page 350
#
# Original source: GFDL climate model, circa 1995
esbasi = 6107.1
tbasi = 273.16
#
aa = -9.09718 *(tbasi/T-1.0)
b = -3.56654 *math.log10(tbasi/T)
c = 0.876793*(1.0-T/tbasi)
e = math.log10(esbasi)
esice = 10.**(aa+b+c+e)
return .1*esice #Convert to Pascals
#Saturation vapor pressure over liquid water (Smithsonian formula)
# Input: Kelvin. Output: Pascal
def satvpw(T):
# compute es over liquid water between -20c and freezing.
# see smithsonian meteorological tables page 350.
#
# Original source: GFDL climate model, circa 1995
esbasw = 1013246.0
tbasw = 373.16
#
aa = -7.90298*(tbasw/T-1)
b = 5.02808*math.log10(tbasw/T)
c = -1.3816e-07*( 10.**( ((1-T/tbasw)*11.344)-1 ) )
d = 8.1328e-03*( 10.**( ((tbasw/T-1)*(-3.49149))-1) )
e = math.log10(esbasw)
esh2O = 10.**(aa+b+c+d+e)
return .1*esh2O #Convert to Pascals
# An alternate formula for saturation vapor pressure over liquid water
def satvpw_Heymsfield(T):
ts=373.16
sr=3.0057166
# Vapor pressure over water. Heymsfield formula
ar = ts/T
br = 7.90298*(ar-1.)
cr = 5.02808*math.log10(ar);
dw = (1.3816E-07)*(10.**(11.344*(1.-1./ar))-1.)
er = 8.1328E-03*((10.**(-(3.49149*(ar-1.))) )-1.)
vp = 10.**(cr-dw+er+sr-br)
vp=vp*1.0e02
return(vp)
def satvpg(T):
#This is the saturation vapor pressure computation used in the
#GFDL climate model. It blends over from water saturation to
#ice saturation as the temperature falls below 0C.
if ((T-273.16) < -20.):
return satvpi(T)
if ( ((T-273.16) >= -20.)&((T-273.16)<=0.)):
return 0.05*(273.16-T)*satvpi(T) + 0.05*(T-253.16)*satvpw(T)
if ((T-273.16)>0.):
return satvpw(T)
#Saturation vapor pressure for any substance, computed using
#the simplified form of Clausius-Clapeyron assuming the perfect
#gas law and constant latent heat
def satvps(T,T0,e0,MolecularWeight,LatentHeat):
Rv=Rstar/MolecularWeight
return e0*math.exp(-(LatentHeat/Rv)*(1./T - 1./T0))
#This example shows how to simplify the use of the simplified
#saturation vapor pressure function, by setting up an object
#that stores the thermodynamic data needed, so it doesn't have
#to be re-entered each time. Because of the __call__ method,
#once the object is created, it can be invoked like a regular
#function.
#
#Usage example:
# To set up a function e(T) that approximates the saturation
# vapor presure for a substance which has a latent heat of
# 2.5e6 J/kg, a molecular weight of 18 and has vapor pressure
# 3589. Pa at a temperature of 300K, create the function using:
#
# e = satvps_function(300.,3589.,18.,2.5e6)
#
# and afterward you can invoke it simply as e(T), where T
# is whatever temperature you want to evaluate it for.
#
#Alternately, satvps_function can be called with a gas object
#as the first argument, e.g.
# e = satvps_function(phys.CO2)
#
#If no other arguments are given, the latent heat of sublimation
#will be used when e(T) is called for temperatures below the triple
#point, and the latent heat of vaporization will be used for
#temperatures above the triple point. To allow you to force
#one or the other latent heats to be used, satvps_function takes
#an optional second argument when the first argument is a gas
#object. Thus,
# e = satvps_function(phys.CO2,'ice')
#will always use the latent heat of sublimation, regardless of T,
#while e = satvps_function(phys.CO2,'liquid') will always use
#the latent heat of vaporization.
class satvps_function:
def __init__(self,Gas_or_T0,e0_or_iceFlag=None,MolecularWeight=None,LatentHeat=None):
#Check if the first argument is a gas object. If not, assume
#that the arguments give T0, e0, etc. as numbers
self.iceFlag = e0_or_iceFlag
if isinstance(Gas_or_T0,gas):
self.gas = Gas_or_T0
self.M = Gas_or_T0.MolecularWeight
self.T0 = Gas_or_T0.TriplePointT
self.e0 = Gas_or_T0.TriplePointP
if self.iceFlag == 'ice':
self.L = Gas_or_T0.L_sublimation
elif self.iceFlag == 'liquid':
self.L = Gas_or_T0.L_vaporization
else:
self.iceFlag = 'switch'
self.M = Gas_or_T0.MolecularWeight
else:
self.L = LatentHeat
self.M = MolecularWeight
self.T0 = Gas_or_T0
self.e0 = e0_or_iceFlag
def __call__(self,T):
#Decide which latent heat to use
if self.iceFlag == 'switch':
if T<self.gas.TriplePointT:
L = self.gas.L_sublimation
else:
L = self.gas.L_vaporization
else:
L = self.L
return satvps(T,self.T0,self.e0,self.M,L)
#Class for computing the moist adiabat for a mixture of
#a condensing and noncondensing gas.
# **ToDo: Add help strings and documentation
#
# **ToDo: The way the help strings for gas objects are
# set up makes the argument help box for the
# creator useless. Fix this somehow
#
#**ToDo: Add controls on resolution, top of atmosphere, etc.
#Do we want this to return molar or mass concentration?
#Maybe do both, but have result stored as an attribute
#
class MoistAdiabat:
'''
MoistAdiabat is a class which creates a callable object
used to compute the moist adiabat for a mixture consisting
of a condensible gas and a noncondensing gas. The gases
are specified as gas objects. By default, the condensible
is water vapor and the noncondensible is modern Earth Air,
if the gases are not specified.
Usage:
To create a function m that computes the moist
adiabat for the gas Condensible mixed with the gas
Noncondensible, do
m = phys.MoistAdiabat(Condensible,Noncondensible)
For example, to do a mixture of condensible CO2 in
noncondensing N2, do
m = phys.MoistAdiabat(phys.CO2,phys.N2)
Once you have created the function, you give it
the surface partial pressure of the noncondensible
and the surface temperature when you call it, and it
returns arrays consisting of pressure, temperature,
molar concentration of the condensible, and mass
specific concentration of the condensible. For example:
p,T,molarCon,massCon = m(1.e5,300.)
for a surface noncondensible pressure of 1.e5 Pascal and
surface temperture of 300K. The values returned
are arrays. The pressure returned is total pressure at
each level (condensible plus noncondensible). By default,
the compution chooses the pressure values on which to return
the results. For some purposes, you might want the results
specified on a list of pressures of your own choosing. The
computation allows for this, by offering an interpolation
option which returns the result interpolated to a pressure
grid of your own choice, which is specified as an optional
third argument to the function. Thus, to get the
pressure values on a list consisting of [1000.,5000.,10000.] Pa,
you would do:
p,T,molarCon,massCon = m(1.e5,300.,[1000.,5000.,10000.])
The calculation is still done at high resolution to preserve
accuracy, but the results are afterward intepolated to the grid
you want using polynomial interpolation. For your convenience,
the pressure returned on the left hand side is a copy of
the pressure list you specified as input.
'''
def __init__(self,condensible=H2O,noncon = air):
self.condensible = condensible
self.noncon = noncon
#Set up saturation vapor pressure function
self.satvp = satvps_function(condensible)
#Set up thermodynamic constants
self.eps = condensible.MolecularWeight/noncon.MolecularWeight
self.L = condensible.L_vaporization
self.Ra = noncon.R
self.Rc = condensible.R
self.cpa = noncon.cp
self.cpc = condensible.cp
#Set up derivative function for integrator
def slope(logpa,logT):
pa = math.exp(logpa)
T = math.exp(logT)
qsat = self.eps*(self.satvp(T)/pa)
num = (1. + (self.L/(self.Ra*T))*qsat)*self.Ra
den = self.cpa + (self.cpc + (self.L/(self.Rc*T) - 1.)*(self.L/T))*qsat
return num/den
self.slope = slope
self.ptop = 1000. #Default top of atmosphere
self.step = -.05 #Default step size for integration
def __call__(self,ps,Ts,pgrid = None):
#Initial conditions
step = self.step #Step size for integration
ptop = self.ptop #Where to stop integratoin
#
logpa = math.log(ps)
logT = math.log(Ts)
ad = integrator(self.slope,logpa,logT,step )
#Initialize lists to save results
pL = [math.exp(logpa) + self.satvp(math.exp(logT))]
molarConL = [self.satvp(math.exp(logT))/pL[0]]
TL = [math.exp(logT)]
#Integration loop
p = 1.e30 #Dummy initial value, to get started
while p > ptop:
ans = ad.next()
pa = math.exp(ans[0])
T = math.exp(ans[1])
p = pa+self.satvp(T)
pL.append(p)
molarConL.append(self.satvp(T)/p)
TL.append(T)
#Numeric.array turns lists into arrays that one
#can do arithmetic on.
pL = Numeric.array(pL)
TL = Numeric.array(TL)
molarConL = Numeric.array(molarConL)
#Now compute mass specific concentration
Mc = self.condensible.MolecularWeight
Mnc = self.noncon.MolecularWeight
Mbar = molarConL*Mc +(1.-molarConL)*Mnc
qL = (Mc/Mbar)*molarConL
#
#The else clause below interpolates to a
#specified pressure array pgrid, if desired.
# interp is a class defined in ClimateUtilities
#which creates a callable object which acts like
#an interpolation function for the listed data give
#as arguments.
if pgrid == None:
return pL,TL,molarConL,qL
else:
T1 = interp(pL,TL)
mc1 = interp(pL,molarConL)
q1 = interp(pL,qL)
T = Numeric.array([T1(pp) for pp in pgrid])
mc = Numeric.array([mc1(pp) for pp in pgrid])
q = Numeric.array([q1(pp) for pp in pgrid])
return Numeric.array(pgrid),T, mc, q
|
aymeric-spiga/planets
|
reserve/phys.py
|
Python
|
gpl-2.0
| 23,846
|
[
"Avogadro"
] |
5a44c0dd47ba128dc772bc987df17d62992363c414987dfc189c6bc5dc6595e5
|
from datetime import datetime
from turbogears.database import PackageHub
from sqlobject import *
from turbogears import identity
sqlhub.processConnection = connectionForURI("postgres://sweetter:sweetter@localhost/sweetter")
# class YourDataClass(SQLObject):
# pass
# identity models.
class Visit(SQLObject):
"""
A visit to your site
"""
class sqlmeta:
table = 'visit'
visit_key = StringCol(length=40, alternateID=True,
alternateMethodName='by_visit_key')
created = DateTimeCol(default=datetime.now)
expiry = DateTimeCol()
def lookup_visit(cls, visit_key):
try:
return cls.by_visit_key(visit_key)
except SQLObjectNotFound:
return None
lookup_visit = classmethod(lookup_visit)
class VisitIdentity(SQLObject):
"""
A Visit that is link to a User object
"""
class sqlmeta:
table = 'visit_identity'
visit_key = StringCol(length=40, alternateID=True,
alternateMethodName='by_visit_key')
user_id = IntCol()
class Group(SQLObject):
"""
An ultra-simple group definition.
"""
# names like "Group", "Order" and "User" are reserved words in SQL
# so we set the name to something safe for SQL
class sqlmeta:
table = 'tg_group'
group_name = UnicodeCol(length=16, alternateID=True,
alternateMethodName='by_group_name')
display_name = UnicodeCol(length=255)
created = DateTimeCol(default=datetime.now)
# collection of all users belonging to this group
users = RelatedJoin('User', intermediateTable='user_group',
joinColumn='group_id', otherColumn='user_id')
# collection of all permissions for this group
permissions = RelatedJoin('Permission', joinColumn='group_id',
intermediateTable='group_permission',
otherColumn='permission_id')
class Sweets(SQLObject):
"""
Users comments
"""
class sqlmeta:
table = 'sweets'
comment = UnicodeCol(length=160)
created = DateTimeCol(default=datetime.now)
votes = IntCol()
user = ForeignKey('User',alternateMethodName='by_user_id', cascade=True)
@classmethod
def get_num_entries(cls, n=5):
import datetime
from sqlobject.sqlbuilder import *
hoy = datetime.datetime.today()
unasem = hoy + datetime.timedelta(days=-7)
conn=cls._connection
sel = conn.sqlrepr(Select((User.q.user_name,\
func.COUNT(Sweets.q.id)), AND(Sweets.q.userID == User.q.id,\
Sweets.q.created > unasem), \
groupBy=User.q.user_name, orderBy=-func.COUNT(Sweets.q.id)))
try:
tal = list(conn.queryAll(sel)[0:n])
except:
tal = []
return tal
@classmethod
def get_num_entries_id(cls, uid):
from sqlobject.sqlbuilder import *
import sqlobject
conn=cls._connection
sel = conn.sqlrepr(Select((User.q.user_name,\
func.COUNT(Sweets.q.id)), \
sqlobject.AND(Sweets.q.userID == User.q.id, Sweets.q.userID == uid),\
groupBy=User.q.user_name, orderBy=-func.COUNT(Sweets.q.id)))
tal = list(conn.queryAll(sel))
if len(tal) > 0:
return tal[0]
else: return ['', 0]
class RSS(SQLObject):
'''
micro planet
'''
class sqlmeta:
table = 'rss'
user = ForeignKey('User', cascade=True)
rss = UnicodeCol()
tag = UnicodeCol(length=15)
url = UnicodeCol()
last_updated = DateTimeCol()
unique = index.DatabaseIndex(user, rss, unique=True)
class Followers(SQLObject):
"""
Relation between users
"""
class sqlmeta:
table = 'followers'
follower = ForeignKey('User', cascade=True)
following = ForeignKey('User', cascade=True)
unique = index.DatabaseIndex(follower, following, unique=True)
class Todo(SQLObject):
class sqlmeta:
table = 'todo'
sweetid = ForeignKey('Sweets', cascade=True)
asigned = ForeignKey('User', cascade=True)
doit = BoolCol()
class UnvalidatedUsers (SQLObject):
class sqlmeta:
table = 'unvalidated_users'
user = ForeignKey('User', cascade=True)
key = UnicodeCol(length=20, alternateMethodName='by_key')
class Recover(SQLObject):
class sqlmeta:
table = 'recover'
user = ForeignKey('User', cascade=True)
created = DateTimeCol(default=datetime.now)
key = UnicodeCol(length=20, alternateMethodName='by_key')
class Favorites (SQLObject):
class sqlmeta:
table = 'favorites'
user = ForeignKey('User', cascade = True)
sweet = ForeignKey('Sweets', cascade = True)
unique = index.DatabaseIndex(user, sweet, unique = True)
class Votes(SQLObject):
class sqlmeta:
table = 'votes'
user = ForeignKey('User', cascade=True)
sweet = ForeignKey('Sweets', cascade=True)
unique = index.DatabaseIndex(user, sweet, unique=True)
class Replies(SQLObject):
class sqlmeta:
table = "replies"
sweet = ForeignKey('Sweets', cascade=True)
to = ForeignKey('User', cascade=True)
unique = index.DatabaseIndex(sweet, to, unique=True)
class User(SQLObject):
"""
Reasonably basic User definition.
Probably would want additional attributes.
"""
# names like "Group", "Order" and "User" are reserved words in SQL
# so we set the name to something safe for SQL
class sqlmeta:
table = 'tg_user'
user_name = UnicodeCol(length=20, alternateID=True, alternateMethodName='by_user_name')
email_address = UnicodeCol(length=255, alternateID=True, alternateMethodName='by_email_address')
url = UnicodeCol(length=255)
api_key = UnicodeCol(length=32, alternateID=True)
avatar = UnicodeCol(length=255)
location = UnicodeCol(length=50, alternateMethodName='by_location')
display_name = UnicodeCol(length=255)
password = UnicodeCol(length=40)
created = DateTimeCol(default=datetime.now)
karma = FloatCol()
nvotos = IntCol(default=0)
nvotos_dia = IntCol(default=0)
validated = BoolCol(default=0)
# groups this user belongs to
groups = RelatedJoin('Group', intermediateTable='user_group',
joinColumn='user_id', otherColumn='group_id')
def _get_permissions(self):
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
def _set_password(self, cleartext_password):
"Runs cleartext_password through the hash algorithm before saving."
password_hash = identity.encrypt_password(cleartext_password)
self._SO_set_password(password_hash)
def set_password_raw(self, password):
"Saves the password as-is to the database."
self._SO_set_password(password)
class Permission(SQLObject):
"""
A relationship that determines what each Group can do
"""
permission_name = UnicodeCol(length=16, alternateID=True,
alternateMethodName='by_permission_name')
description = UnicodeCol(length=255)
groups = RelatedJoin('Group',
intermediateTable='group_permission',
joinColumn='permission_id',
otherColumn='group_id')
class Jabber(SQLObject):
'''
Lista de usuarios con el jabberbot activo y sus correspondientes
cuentas
'''
class sqlmeta:
table = 'jabber'
user = ForeignKey('User', cascade=True)
jabber = UnicodeCol(length=255, alternateID=True,\
alternateMethodName='by_jabber')
created = DateTimeCol(default=datetime.now)
validated = BoolCol(default=0)
active = BoolCol(default=1)
unique = index.DatabaseIndex(user, jabber, unique=True)
|
danigm/sweetter
|
sweetter/s2_to_s3/s2model.py
|
Python
|
agpl-3.0
| 7,917
|
[
"VisIt"
] |
2b8edddd23d2bce33c593dc8dd8d41f17286e7ba352a0f9b21a0f706c10bb665
|
from Bio import ExPASy
from Bio import SeqIO
import numpy as np
import csv
AA_LETTERS = sorted("ACEDGFIHKMLNQPSRTWVY")
# list all proteins in ecoli by uniprot_ID - aprse from .txt file
def download_aa_dist_per_gene(UPID_list_fname, cutoff):
UPID_list = []
for row in open(UPID_list_fname, 'r'):
if row:
UPID_list.append(row[48:54])
if cutoff > 0:
UPID_list = UPID_list[0:min(cutoff, len(UPID_list))]
# a dictionary containing the aa_dist for each uniprot ID
UPID_to_aa_dist = {}
for i, UPID in enumerate(UPID_list):
print i, "\t", UPID
# initialize a dictionary for amino acids frequency in each protein
aa_dist = dict([(aa, 0) for aa in AA_LETTERS])
# call for aa sequence for each uniprot from swiss prot - biopython tool
handle = ExPASy.get_sprot_raw(UPID)
seq_record = SeqIO.read(handle, "swiss")
# count frequency for each aa in each UPID
# update aa_frequency in aa_dict - to avoid bugs where for example an aa seq from
# swiss prot may contain weired letters such as 'X'
for aa in list(seq_record):
if aa in AA_LETTERS:
aa_dist[aa] += 1
UPID_to_aa_dist[UPID] = np.array([aa_dist[aa] for aa in AA_LETTERS])
return UPID_to_aa_dist
def load_UPID_to_aa_dist(aa_dist_by_gene_fname):
input_csv = csv.reader(open(aa_dist_by_gene_fname, 'r'), delimiter='\t')
input_csv.next()
UPID_to_aa_dist = {}
for row in input_csv:
UPID = row[0]
UPID_to_aa_dist[UPID] = np.array([float(x) for x in row[1:]])
return UPID_to_aa_dist
def calculate_aa_dist_per_genome(UPID_to_aa_dist):
genomic_aa_dist = np.zeros((1, len(AA_LETTERS)))
for aa_dist in UPID_to_aa_dist.values():
genomic_aa_dist += aa_dist
return genomic_aa_dist
def write_to_tsv(header, dictionary, output_fname):
# write output file
output_csv = csv.writer(open(output_fname, 'w'), delimiter='\t')
# header for output file
output_csv.writerow(header)
for key in dictionary.keys():
output_csv.writerow([key] + list(dictionary[key]))
def calculate_aa_dist_per_proteome(proteome_fname, UPID_to_aa_dist):
proteomics_csv_reader = csv.reader(open(proteome_fname, 'r'), delimiter='\t')
# skip the first empty row
proteomics_csv_reader.next()
conditions = proteomics_csv_reader.next()[10:29]
UPID_to_abundance_vectors = {}
total_proteomic_aa_dist = np.zeros((len(conditions), len(AA_LETTERS)))
for row in proteomics_csv_reader:
if row[0]:
# 19 different growth conditions
UPID = row[1]
UPID_to_abundance_vectors[UPID] = [float(x) for x in row[10:29]]
for i, condition in enumerate(conditions):
for UPID in UPID_to_aa_dist.keys():
aa_dist = UPID_to_aa_dist[UPID]
if UPID in UPID_to_abundance_vectors:
abundance = UPID_to_abundance_vectors[UPID]
total_proteomic_aa_dist[i, :] += abundance[i] * aa_dist
return total_proteomic_aa_dist
def normalize_aa_dist(total_proteomic_aa_dist, conditions):
total_proteomic_aa_dist_normed = a_normed = np.zeros((conditions, len(AA_LETTERS)))
for i, row in enumerate(total_proteomic_aa_dist):
total_proteomic_aa_dist_normed[i] = total_proteomic_aa_dist[i] / sum(total_proteomic_aa_dist[i])
return total_proteomic_aa_dist_normed
if __name__ == "__main__":
UPID_to_aa_dist = download_aa_dist_per_gene('all_ecoli_genes.txt',20)
write_to_tsv(['UPID'] + AA_LETTERS, UPID_to_aa_dist, 'aa_dist_by_UP_ID.csv')
aa_dist_genome = calculate_aa_dist_per_genome(UPID_to_aa_dist)
total_proteomic_aa_dist = calculate_aa_dist_per_proteome('Ecoli_19_Conditions_Proteomics.csv', UPID_to_aa_dist)
a = normalize_aa_dist(total_proteomic_aa_dist)
print a
|
eladnoor/proteomaps
|
src/amino_acid_distribution/download_aa_sequences.py
|
Python
|
mit
| 4,068
|
[
"Biopython"
] |
10a30ec9b648c0d787af1f9731188268d60325016dc787500861f9ce49987ae9
|
################################################################################
##
## Pythonc--Python to C++ translator
##
## Copyright 2013 Zach Wegner, Matt Craighead
##
## This file is part of Pythonc.
##
## Pythonc is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Pythonc is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Pythonc. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import ast
import os
import sys
import syntax
inplace_op_table = {
'__%s__' % x: '__i%s__' % x
for x in ['add', 'and', 'floordiv', 'lshift', 'mod', 'mul', 'or', 'pow', 'rshift', 'sub', 'truediv', 'xor']
}
class TranslateError(Exception):
def __init__(self, node, msg):
super().__init__('error at line %s: %s' % (node.lineno, msg))
class Transformer(ast.NodeTransformer):
def __init__(self):
self.statements = []
self.in_class = False
self.in_function = False
def generic_visit(self, node):
raise TranslateError(node, 'can\'t translate %s' % node)
def visit_child_list(self, node):
r = []
for i in node:
c = self.visit(i)
if isinstance(c, list):
r.extend(c)
elif c is not None:
r.append(c)
return r
def visit_Name(self, node):
#assert isinstance(node.ctx, ast.Load)
if node.id in ['True', 'False']:
return syntax.BoolConst(node.id == 'True')
elif node.id == 'None':
return syntax.NoneConst()
return syntax.Load(node.id)
def visit_Num(self, node):
if isinstance(node.n, float):
raise TranslateError(node, 'Pythonc currently does not support float literals')
assert isinstance(node.n, int)
return syntax.IntConst(node.n)
def visit_Str(self, node):
assert isinstance(node.s, str)
return syntax.StringConst(node.s)
def visit_Bytes(self, node):
assert isinstance(node.s, bytes)
return syntax.BytesConst(node.s)
# Unary Ops
def visit_Invert(self, node): return '__invert__'
def visit_Not(self, node): return '__not__'
def visit_UAdd(self, node): return '__pos__'
def visit_USub(self, node): return '__neg__'
def visit_UnaryOp(self, node):
op = self.visit(node.op)
rhs = self.visit(node.operand)
return syntax.UnaryOp(op, rhs)
# Binary Ops
def visit_Add(self, node): return '__add__'
def visit_BitAnd(self, node): return '__and__'
def visit_BitOr(self, node): return '__or__'
def visit_BitXor(self, node): return '__xor__'
def visit_Div(self, node): return '__truediv__'
def visit_FloorDiv(self, node): return '__floordiv__'
def visit_LShift(self, node): return '__lshift__'
def visit_Mod(self, node): return '__mod__'
def visit_Mult(self, node): return '__mul__'
def visit_Pow(self, node): return '__pow__'
def visit_RShift(self, node): return '__rshift__'
def visit_Sub(self, node): return '__sub__'
def visit_BinOp(self, node):
op = self.visit(node.op)
lhs = self.visit(node.left)
rhs = self.visit(node.right)
return syntax.BinaryOp(op, lhs, rhs)
# Comparisons
def visit_Eq(self, node): return '__eq__'
def visit_NotEq(self, node): return '__ne__'
def visit_Lt(self, node): return '__lt__'
def visit_LtE(self, node): return '__le__'
def visit_Gt(self, node): return '__gt__'
def visit_GtE(self, node): return '__ge__'
def visit_In(self, node): return '__contains__'
def visit_NotIn(self, node): return '__ncontains__'
def visit_Is(self, node): return '__is__'
def visit_IsNot(self, node): return '__isnot__'
def visit_Compare(self, node):
assert len(node.ops) == 1
assert len(node.comparators) == 1
op = self.visit(node.ops[0])
lhs = self.visit(node.left)
rhs = self.visit(node.comparators[0])
# Sigh--Python has these ordered weirdly
if op in ['__contains__', '__ncontains__']:
lhs, rhs = rhs, lhs
return syntax.BinaryOp(op, lhs, rhs)
# Bool ops
def visit_And(self, node): return 'and'
def visit_Or(self, node): return 'or'
def visit_BoolOp(self, node):
assert len(node.values) >= 2
op = self.visit(node.op)
rhs = self.visit(node.values[-1])
for v in reversed(node.values[:-1]):
lhs = self.visit(v)
rhs = syntax.BoolOp(op, lhs, rhs)
return rhs
def visit_IfExp(self, node):
expr = syntax.Test(self.visit(node.test))
true_expr = self.visit(node.body)
false_expr = self.visit(node.orelse)
return syntax.IfExp(expr, true_expr, false_expr)
def visit_List(self, node):
items = [self.visit(i) for i in node.elts]
return syntax.List(items)
def visit_Tuple(self, node):
items = [self.visit(i) for i in node.elts]
return syntax.Tuple(items)
def visit_Dict(self, node):
keys = [self.visit(i) for i in node.keys]
values = [self.visit(i) for i in node.values]
return syntax.Dict(keys, values)
def visit_Set(self, node):
items = [self.visit(i) for i in node.elts]
return syntax.Set(items)
def visit_Subscript(self, node):
l = self.visit(node.value)
if isinstance(node.slice, ast.Index):
index = self.visit(node.slice.value)
return syntax.Subscript(l, index)
elif isinstance(node.slice, ast.Slice):
[start, end, step] = [self.visit(a) if a else syntax.NoneConst() for a in
[node.slice.lower, node.slice.upper, node.slice.step]]
return syntax.Slice(l, start, end, step)
def visit_Attribute(self, node):
assert isinstance(node.ctx, ast.Load)
l = self.visit(node.value)
attr = syntax.Attribute(l, syntax.StringConst(node.attr))
return attr
def visit_Call(self, node):
fn = self.visit(node.func)
if node.starargs:
assert not node.args
args = syntax.TupleFromIter(self.visit(node.starargs))
else:
args = syntax.Tuple([self.visit(a) for a in node.args])
if node.kwargs:
kwargs = self.visit(node.kwargs)
else:
kwargs = syntax.NullConst()
if node.keywords:
assert not node.kwargs
keys = [syntax.StringConst(i.arg) for i in node.keywords]
values = [self.visit(i.value) for i in node.keywords]
kwargs = syntax.Dict(keys, values)
return syntax.Call(fn, args, kwargs)
def visit_Assign(self, node):
# XXX will this always be unique? Should find a better
# solution for this, regardless...
temp = '__tuple_unpack_temp'
stmts = [syntax.Store(temp, self.visit(node.value))]
value = syntax.Load(temp)
def assign_value(target, value):
if isinstance(target, ast.Name):
return [syntax.Store(target.id, value)]
elif isinstance(target, ast.Tuple):
stmts = []
for i, t in enumerate(target.elts):
stmts += assign_value(t, syntax.Subscript(value, syntax.IntConst(i)))
return stmts
elif isinstance(target, ast.Attribute):
base = self.visit(target.value)
return [syntax.StoreAttr(base, syntax.StringConst(target.attr), value)]
elif isinstance(target, ast.Subscript):
assert isinstance(target.slice, ast.Index)
base = self.visit(target.value)
index = self.visit(target.slice.value)
return [syntax.StoreSubscript(base, index, value)]
else:
assert False
for target in reversed(node.targets):
stmts += assign_value(target, value)
return stmts
def visit_AugAssign(self, node):
op = self.visit(node.op)
value = self.visit(node.value)
op = inplace_op_table[op]
if isinstance(node.target, ast.Name):
target = node.target.id
# XXX HACK: doesn't modify in place
binop = syntax.BinaryOp(op, syntax.Load(target), value)
return [syntax.Store(target, binop)]
elif isinstance(node.target, ast.Attribute):
l = self.visit(node.target.value)
attr_name = syntax.StringConst(node.target.attr)
attr = syntax.Attribute(l, attr_name)
binop = syntax.BinaryOp(op, attr, value)
return [syntax.StoreAttr(l, attr_name, binop)]
elif isinstance(node.target, ast.Subscript):
assert isinstance(node.target.slice, ast.Index)
base = self.visit(node.target.value)
index = self.visit(node.target.slice.value)
old = syntax.Subscript(base, index)
binop = syntax.BinaryOp(op, old, value)
return [syntax.StoreSubscript(base, index, binop)]
else:
assert False
def visit_Delete(self, node):
assert len(node.targets) == 1
target = node.targets[0]
assert isinstance(target, ast.Subscript)
assert isinstance(target.slice, ast.Index)
name = self.visit(target.value)
value = self.visit(target.slice.value)
return [syntax.DeleteSubscript(name, value)]
def visit_If(self, node):
expr = syntax.Test(self.visit(node.test))
stmts = self.visit_child_list(node.body)
if node.orelse:
else_block = self.visit_child_list(node.orelse)
else:
else_block = []
return syntax.If(expr, stmts, else_block)
def visit_Break(self, node):
return syntax.Break()
def visit_Continue(self, node):
return syntax.Continue()
def visit_For(self, node):
assert not node.orelse
iter = self.visit(node.iter)
stmts = self.visit_child_list(node.body)
stmts.append(syntax.CollectGarbage(None))
if isinstance(node.target, ast.Name):
target = node.target.id
elif isinstance(node.target, ast.Tuple):
target = [t.id for t in node.target.elts]
else:
assert False
return syntax.For(target, iter, stmts)
def visit_While(self, node):
assert not node.orelse
test = self.visit(node.test)
test = syntax.If(syntax.Test(syntax.UnaryOp('__not__', test)),
[syntax.Break()], [])
stmts = [test] + self.visit_child_list(node.body)
stmts.append(syntax.CollectGarbage(None))
return syntax.While(stmts)
# XXX We are just flattening "with x as y:" into "y = x" (this works in some simple cases with open()).
def visit_With(self, node):
assert node.optional_vars
expr = self.visit(node.context_expr)
stmts = [syntax.Store(node.optional_vars.id, expr)]
stmts += self.visit_child_list(node.body)
return stmts
def visit_Comprehension(self, node, comp_type):
assert len(node.generators) == 1
gen = node.generators[0]
assert len(gen.ifs) <= 1
if isinstance(gen.target, ast.Name):
target = (gen.target.id)
elif isinstance(gen.target, ast.Tuple):
target = [(t.id) for t in gen.target.elts]
else:
assert False
iter = self.visit(gen.iter)
cond = None
if gen.ifs:
cond = self.visit(gen.ifs[0])
if comp_type == 'dict':
expr = self.visit(node.key)
expr2 = self.visit(node.value)
else:
expr = self.visit(node.elt)
expr2 = None
return syntax.Comprehension(comp_type, target, iter, cond, expr, expr2)
def visit_ListComp(self, node):
return self.visit_Comprehension(node, 'list')
def visit_SetComp(self, node):
return self.visit_Comprehension(node, 'set')
def visit_DictComp(self, node):
return self.visit_Comprehension(node, 'dict')
def visit_GeneratorExp(self, node):
return self.visit_Comprehension(node, 'generator')
def visit_Return(self, node):
if node.value is not None:
expr = self.visit(node.value)
self.statements.append(syntax.CollectGarbage(expr))
return syntax.Return(expr)
else:
return syntax.Return(None)
def visit_Assert(self, node):
expr = self.visit(node.test)
return syntax.Assert(expr, node.lineno)
def visit_Raise(self, node):
assert not node.cause
expr = self.visit(node.exc)
return syntax.Raise(expr, node.lineno)
def visit_arguments(self, node):
assert not node.kwarg
args = [a.arg for a in node.args]
defaults = self.visit_child_list(node.defaults)
if node.kwonlyargs:
kwonlyargs = [a.arg for a in node.kwonlyargs]
kw_defaults = self.visit_child_list(node.kw_defaults)
else:
kwonlyargs, kw_defaults = None, []
return syntax.Arguments(args, defaults, node.vararg,
kwonlyargs, kw_defaults)
def visit_FunctionDef(self, node):
assert not self.in_function
decorators = set()
for decorator in node.decorator_list:
assert isinstance(decorator, ast.Name)
decorators.add(decorator.id)
is_builtin = False
if 'builtin' in decorators:
decorators.remove('builtin')
is_builtin = True
assert not decorators
# Set some state and recursively visit child nodes, then restore state
self.in_function = True
args = self.visit(node.args)
body = [args] + self.visit_child_list(node.body)
if not body or not isinstance(body[-1], syntax.Return):
body.append(syntax.Return(None))
self.in_function = False
exp_name = node.exp_name if 'exp_name' in dir(node) else None
return syntax.FunctionDef(node.name, body, exp_name, is_builtin)
def visit_ClassDef(self, node):
assert not node.bases
assert not node.keywords
assert not node.starargs
assert not node.kwargs
assert not node.decorator_list
assert not self.in_class
assert not self.in_function
for fn in node.body:
if isinstance(fn, ast.FunctionDef):
fn.exp_name = '_%s_%s' % (node.name, fn.name)
self.in_class = True
body = self.visit_child_list(node.body)
self.in_class = False
return syntax.ClassDef(node.name, body)
def gen_import(self, node, name, from_names=None):
if name in syntax.builtin_modules:
assert not from_names
module = syntax.SingletonRef('module_%s_singleton' % name)
else:
for d in (sys.path[0], '.'):
path = '%s/%s.py' % (d, name)
if os.path.exists(path):
break
else:
raise TranslateError(node, 'cannot find %s' % path)
stmts = transform(path, name == '__builtins__')
path = os.path.abspath(path)
module = syntax.ImportStatement(name, from_names, path, stmts)
return module
def visit_Import(self, node):
statements = []
for name in node.names:
module = self.gen_import(node, name.name)
name = name.asname or name.name
statements.append(syntax.Store(name, module))
return statements
def visit_ImportFrom(self, node):
assert not node.level
assert '.' not in node.module
# Empty list is a sentinel value for *
if any('*' in name.name for name in node.names):
assert len(node.names) == 1
from_names = []
else:
from_names = [(name.name, name.asname or name.name) for name in node.names]
return [self.gen_import(node, node.module, from_names=from_names)]
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Module(self, node):
return self.visit_child_list(node.body)
def visit_Global(self, node):
return syntax.Global(node.names)
def visit_Pass(self, node): pass
def visit_Load(self, node): pass
def visit_Store(self, node): pass
def transform(path, builtin=False):
with open(path) as f:
text = f.read()
# XXX HACK! this seems rather necessary at the moment, so the syntax
# module can stay ast-module agnostic.
if not builtin:
text = 'from __builtins__ import *\n' + text
node = ast.parse(text)
return Transformer().visit(node)
def compile(input_path, output_path):
node = transform(input_path)
syntax.write_output(node, output_path)
|
zwegner/pythonc
|
transform.py
|
Python
|
gpl-3.0
| 17,388
|
[
"VisIt"
] |
af399d42c3da82e89a17768d38910f046db1eca0843528e83cde60de84fc083f
|
#!/usr/bin/python2
# vim: ts=4 : sts=4 : sw=4 : et :
import sqlite3
import datetime
import os.path
import sys
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from wx.lib.mixins.listctrl import ColumnSorterMixin
import version
import survey
import data
import about
galaxy_names = [
"Thustra's Eye",
"In'Kar Border Region",
"Ransuul's Flaming Sword",
"Vulcan's Forge",
"Crown of Othon",
"Heart of Victorus",
"House Zanathar",
"Seven Ten",
"Dyrathon's Retreat",
"Fallen Legions of Muturon",
"Indigo Sea",
"Black Hole",
"Core",
"Muturon Encounter",
"Vreenox Eclipse",
"Andrometa Rising",
"Falla's Embrace",
"Shores of Hazeron",
"Veil of Targoss",
"Edge of the Rift",
]
orbit_zones = [
'Inferno Zone',
'Inner Zone',
'Habitable Zone',
'Outer Zone',
'Frigid Zone',
]
body_kinds = [
'Ringworld',
'Planet',
'Large Moon',
'Moon',
'Ring',
'Gas Giant',
'Star',
]
class AutoWidthListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT)
ListCtrlAutoWidthMixin.__init__(self)
class SortedListCtrl(AutoWidthListCtrl, ColumnSorterMixin):
def __init__(self, parent, data):
AutoWidthListCtrl.__init__(self, parent)
ColumnSorterMixin.__init__(self, len(data.values()[0]))
self.itemDataMap = data
def GetListCtrl(self):
return self
class ResultListCtrl(SortedListCtrl):
def __init__(self, parent, data):
SortedListCtrl.__init__(self, parent, data)
#build the columns
self.InsertColumn(0, 'name', width=140)
self.InsertColumn(1, 'tl', wx.LIST_FORMAT_RIGHT, 50)
self.InsertColumn(2, 'quality', wx.LIST_FORMAT_RIGHT, 60)
self.InsertColumn(3, 'prev', wx.LIST_FORMAT_RIGHT, 60)
self.InsertColumn(4, 'diameter', width=80)
self.InsertColumn(5, 'kind', width=80)
self.InsertColumn(6, 'type', width=80)
self.InsertColumn(7, 'zone', width=50)
self.InsertColumn(8, 'world', width=140)
self.InsertColumn(9, 'system', width=140)
self.InsertColumn(10, 'sector', width=140)
self.InsertColumn(11, 'galaxy', width=140)
self.InsertColumn(12, 'coords', width=80)
#insert the data
self.InsertData(data)
def InsertData(self, data):
items = data.items()
for key, i in items:
index = self.InsertStringItem(sys.maxint, i[0])
for k in range(1, len(i)):
self.SetStringItem(index, k, i[k])
self.SetItemData(index, key)
def SetData(self, data):
self.DeleteAllItems()
self.InsertData(data)
self.itemDataMap = data
class Menubar(wx.MenuBar):
def __init__(self, parent):
wx.MenuBar.__init__(self)
fileMenu = wx.Menu()
helpMenu = wx.Menu()
fitem = fileMenu.Append(wx.ID_OPEN, 'Define DB', 'Define Database')
parent.Bind(wx.EVT_MENU, parent.DefineDatabase, fitem)
fitem = fileMenu.Append(wx.ID_REDO, 'Reprocess DB', 'Reprocess Database')
parent.Bind(wx.EVT_MENU, parent.ReprocessDatabase, fitem)
fitem = fileMenu.Append(wx.ID_CLEAR, 'Clear DB', 'Clear database')
parent.Bind(wx.EVT_MENU, parent.ClearDatabase, fitem)
fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
parent.Bind(wx.EVT_MENU, parent.OnQuit, fitem)
hitem = helpMenu.Append(wx.ID_ABOUT, 'About', 'About application')
parent.Bind(wx.EVT_MENU, parent.ShowAbout, hitem)
self.Append(fileMenu, '&File')
self.Append(helpMenu, '&Help')
parent.SetMenuBar(self)
class Toolbar(wx.BoxSizer):
def __init__(self, parent, grandparent):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
#create the buttons
self.add_button = wx.Button(parent, id=wx.ID_ADD, style=wx.BU_EXACTFIT)
#add the buttons to the widget
self.Add(self.add_button, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT)
#bind the buttons to actions
grandparent.Bind(wx.EVT_BUTTON, grandparent.AddFile, id=self.add_button.GetId())
class SearchControls(wx.BoxSizer):
def __init__(self, parent, grandparent):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
resources = [
'Air',
'Animal Carcass',
'Antiflux Particles',
'Beans',
'Bolite',
'Borexino Precipitate',
'Cheese',
'Coal',
'Cryozine',
'Crystals',
'Eggs',
'Eludium',
'Fertilizer',
'Fish',
'Fruit',
'Gems',
'Grain',
'Grapes',
'Herbs',
'Hops',
'Hydrogen',
'Ice',
'Ioplasma',
'Log',
'Lumenite',
'Magmex',
'Milk',
'Minerals',
'Myrathane',
'Natural Gas',
'Nuts',
'Oil',
'Ore',
'Phlogiston',
'Plant Fiber',
'Polytaride',
'Radioactives',
'Spices',
'Stone',
'Sunlight',
'Type A Preons',
'Type B Preons',
'Type F Preons',
'Type G Preons',
'Type K Preons',
'Type M Preons',
'Type O Preons',
'Vegetable',
'Vegetation Density',
'Vulcanite',
'Water in the Environment',
]
name_l = wx.StaticText(parent, label="Name:")
self.name_field = wx.ComboBox(parent, style=wx.TE_PROCESS_ENTER, choices=resources)
tl_l = wx.StaticText(parent, label="Min TL:")
self.tl_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
galaxy_l = wx.StaticText(parent, label="Galaxy:")
self.galaxy_field = wx.ComboBox(parent, style=wx.TE_PROCESS_ENTER, choices=galaxy_names)
self.orbit_field = wx.ListBox(parent, style=wx.LB_EXTENDED, choices=orbit_zones)
self.body_field = wx.ListBox(parent, style=wx.LB_EXTENDED, choices=body_kinds)
planet_l = wx.StaticText(parent, label="Planet:")
self.planet_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
system_l = wx.StaticText(parent, label="System:")
self.system_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
sector_l = wx.StaticText(parent, label="Sector:")
self.sector_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
minsec_l = wx.StaticText(parent, label="Min Sec:")
self.minsecx_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.minsecy_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.minsecz_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
maxsec_l = wx.StaticText(parent, label="Max Sec:")
self.maxsecx_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.maxsecy_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.maxsecz_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
center_l = wx.StaticText(parent, label="Center Coords:")
self.centerx_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.centery_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.centerz_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
radius_l = wx.StaticText(parent, label="Search Radius:")
self.radius_field = wx.TextCtrl(parent, style=wx.TE_PROCESS_ENTER)
self.reset_button = wx.Button(parent, id=wx.ID_CLEAR)
self.search_button = wx.Button(parent, id=wx.ID_FIND)
vbox_l = wx.BoxSizer(wx.VERTICAL)
vbox_r = wx.BoxSizer(wx.VERTICAL)
hbox_bodies = wx.BoxSizer(wx.HORIZONTAL)
hbox_buttons = wx.BoxSizer(wx.HORIZONTAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
vbox_coords_ls = wx.BoxSizer(wx.VERTICAL)
vbox_coords = wx.BoxSizer(wx.VERTICAL)
vbox_radius = wx.BoxSizer(wx.VERTICAL)
hbox_minsec = wx.BoxSizer(wx.HORIZONTAL)
hbox_maxsec = wx.BoxSizer(wx.HORIZONTAL)
hbox_center = wx.BoxSizer(wx.HORIZONTAL)
hbox_radius = wx.BoxSizer(wx.HORIZONTAL)
self.Add(vbox_l, proportion=3, flag=wx.EXPAND)
self.Add(vbox_r, proportion=1, flag=wx.EXPAND)
vbox_l.Add(hbox1, proportion=0, flag=wx.EXPAND)
vbox_l.Add(hbox2, proportion=0, flag=wx.EXPAND)
vbox_l.Add(hbox3, proportion=0, flag=wx.EXPAND)
vbox_l.Add(hbox4, proportion=1, flag=wx.EXPAND)
vbox_r.Add(hbox_bodies, proportion=1, flag=wx.EXPAND)
hbox3.Add(vbox_coords_ls, proportion=0, flag=wx.LEFT, border=5)
hbox3.Add(vbox_coords, proportion=1)
hbox3.Add(vbox_radius, proportion=2)
hbox4.Add(hbox_buttons, proportion=0, flag=wx.EXPAND)
vbox_coords.Add(hbox_minsec, proportion=0, flag=wx.EXPAND)
vbox_coords.Add(hbox_maxsec, proportion=0, flag=wx.EXPAND)
vbox_radius.Add(hbox_center, proportion=0, flag=wx.EXPAND)
vbox_radius.Add(hbox_radius, proportion=0, flag=wx.EXPAND)
hbox1.Add(name_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox1.Add(self.name_field, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox1.Add(tl_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox1.Add(self.tl_field, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox1.Add(galaxy_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox1.Add(self.galaxy_field, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(planet_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(self.planet_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(system_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(self.system_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(sector_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox2.Add(self.sector_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
vbox_coords_ls.Add(minsec_l, proportion=1, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=5)
vbox_coords_ls.Add(maxsec_l, proportion=1, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=5)
hbox_minsec.Add(self.minsecx_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_minsec.Add(self.minsecy_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_minsec.Add(self.minsecz_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_maxsec.Add(self.maxsecx_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_maxsec.Add(self.maxsecy_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_maxsec.Add(self.maxsecz_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_center.Add(center_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_center.Add(self.centerx_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_center.Add(self.centery_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_center.Add(self.centerz_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_radius.Add(radius_l, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_radius.Add(self.radius_field, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_buttons.Add(self.reset_button, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_buttons.Add(self.search_button, proportion=0, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, border=5)
hbox_bodies.Add(self.orbit_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.EXPAND, border=5)
hbox_bodies.Add(self.body_field, proportion=1, flag=wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.EXPAND, border=5)
#bind the search controls
grandparent.Bind(wx.EVT_BUTTON, self.OnReset, id=self.reset_button.GetId())
grandparent.Bind(wx.EVT_BUTTON, grandparent.OnSearch, id=self.search_button.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.name_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.tl_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.galaxy_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.planet_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.system_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.sector_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.minsecx_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.minsecy_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.minsecz_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.maxsecx_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.maxsecy_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.maxsecz_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.centerx_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.centery_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.centerz_field.GetId())
grandparent.Bind(wx.EVT_TEXT_ENTER, grandparent.OnSearch, id=self.radius_field.GetId())
def OnReset(self, e):
self.name_field.SetValue("")
self.tl_field.SetValue("")
self.galaxy_field.SetValue("")
self.orbit_field.SetSelection(wx.NOT_FOUND)
self.body_field.SetSelection(wx.NOT_FOUND)
self.planet_field.SetValue("")
self.system_field.SetValue("")
self.sector_field.SetValue("")
self.minsecx_field.SetValue("")
self.minsecy_field.SetValue("")
self.minsecz_field.SetValue("")
self.maxsecx_field.SetValue("")
self.maxsecy_field.SetValue("")
self.maxsecz_field.SetValue("")
self.centerx_field.SetValue("")
self.centery_field.SetValue("")
self.centerz_field.SetValue("")
self.radius_field.SetValue("")
class Galactiscan(wx.Frame):
def __init__(self, *args, **kwargs):
super(Galactiscan, self).__init__(*args, **kwargs)
self.InitUI()
def OnSearch(self, e):
self.status.SetStatusText("Searching...")
name = self.search_controls.name_field.GetValue()
tl = self.search_controls.tl_field.GetValue()
galaxy = self.search_controls.galaxy_field.GetValue()
planet = self.search_controls.planet_field.GetValue()
system = self.search_controls.system_field.GetValue()
sector = self.search_controls.sector_field.GetValue()
minsecx = self.search_controls.minsecx_field.GetValue()
minsecy = self.search_controls.minsecy_field.GetValue()
minsecz = self.search_controls.minsecz_field.GetValue()
maxsecx = self.search_controls.maxsecx_field.GetValue()
maxsecy = self.search_controls.maxsecy_field.GetValue()
maxsecz = self.search_controls.maxsecz_field.GetValue()
centerx = self.search_controls.centerx_field.GetValue()
centery = self.search_controls.centery_field.GetValue()
centerz = self.search_controls.centerz_field.GetValue()
radius = self.search_controls.radius_field.GetValue()
def values_from_indices(l, indices):
ret = []
for i in indices:
ret.append(l[i])
return ret
orbits = values_from_indices(orbit_zones, self.search_controls.orbit_field.GetSelections())
bodies = values_from_indices(body_kinds, self.search_controls.body_field.GetSelections())
if name+tl+planet+system+sector != '':
rows = data.find_resources(exactname=name, mintl=tl, orbit_zones=orbits, body_kinds=bodies,
planet=planet, system=system, sector=sector, galaxy=galaxy,
minsecx=minsecx, minsecy=minsecy, minsecz=minsecz,
maxsecx=maxsecx, maxsecy=maxsecy, maxsecz=maxsecz,
centerx=centerx, centery=centery, centerz=centerz,
radius=radius,
)
self.list.SetData(data.format_as_dict(rows))
self.status.SetStatusText("%d resources found" % len(rows))
def DefineDatabase(self, e):
last_path = os.path.abspath(data.get_database_path())
last_dir, last_file = os.path.split(last_path)
dialog = wx.FileDialog(self, message="Please select the file you wish to use.",
style=wx.FD_SAVE,
defaultDir=last_dir,
defaultFile=last_file,
wildcard='Database files (*.sqlite3)|*.sqlite3|All files|*',
)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
data.set_database_path(path)
self.status.SetStatusText("Database set to %s" % path)
else:
self.status.SetStatusText("Database unchanged (%s)" % last_path)
def AddFile(self, e):
last_path = os.path.abspath(data.get_last_starmap_path())
last_dir, last_file = os.path.split(last_path)
dialog = wx.FileDialog(self, message="Please select the files you wish to process.",
style=wx.FD_OPEN|wx.FD_MULTIPLE|wx.FD_FILE_MUST_EXIST,
defaultDir=last_dir,
defaultFile=last_file,
wildcard='XML files (*.xml)|*.xml|All files|*',
)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
data.set_last_starmap_path(paths[-1])
self.status.SetStatusText("Now processing %s files..." % len(paths))
count = data.add_files(paths)
self.status.SetStatusText("%s surveys added" % count)
else:
self.status.SetStatusText("No surveys added")
def ReprocessDatabase(self, e):
#TODO: This is very slow; make it asynchronous and add a progress meter.
self.status.SetStatusText("Reprocessing database...")
count = data.add_files_from_internal_raws()
if count > 0:
self.status.SetStatusText("%s surveys added" % count)
else:
self.status.SetStatusText("No surveys added")
def ClearDatabase(self, e):
self.status.SetStatusText("Clearing database...")
data.drop_tables()
self.status.SetStatusText("Database cleared")
#def OnPaste(self, e):
# if not wx.TheClipboard.Open():
# #maybe it is already open, so close it and try again
# wx.TheClipboard.Close()
# if not wx.TheClipboard.Open():
# #give up
# self.status.SetStatusText("Could not open clipboard")
# return
# if wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_TEXT)):
# self.status.SetStatusText("Processing clipboard...")
# data_object = wx.TextDataObject()
# wx.TheClipboard.GetData(data_object)
# count = data.add_text(data_object.GetText())
# self.status.SetStatusText("%s surveys added from clipboard" % count)
# else:
# self.status.SetStatusText("Text is not supported by the clipboard")
# wx.TheClipboard.Close()
def OnResize(self, e):
#save the new dimensions
wx.Config.Get().WriteInt('/window/width', e.GetSize()[0])
wx.Config.Get().WriteInt('/window/height', e.GetSize()[1])
#allow normal event code to run
e.Skip()
def ShowAbout(self, e):
self.about.Show(True)
def InitUI(self):
self.SetTitle(version.fancy_name)
if wx.Config.Get().HasEntry('/window/width'):
size = (wx.Config.Get().ReadInt('/window/width'), wx.Config.Get().ReadInt('/window/height'))
else:
size = (1000, 400)
self.SetSize(size)
self.Bind(wx.EVT_SIZE, self.OnResize, self)
#set up the menus
self.menubar = Menubar(self)
#set up the about window
self.about = about.AboutWindow(self)
#set up the panel
panel = wx.Panel(self)
main_vbox = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(main_vbox)
#add the toolbar
self.toolbar = Toolbar(panel, self)
main_vbox.Add(self.toolbar, proportion=0, flag=wx.TOP|wx.BOTTOM|wx.EXPAND, border=0)
#the main viewing area
stuff = {
0 : ('', '', '', '', '', '', '', '', '', '', '', '', ''),
}
self.list = ResultListCtrl(panel, stuff)
main_vbox.Add(self.list, proportion=1, flag=wx.EXPAND)
#add the search controls
self.search_controls = SearchControls(panel, self)
main_vbox.Add(self.search_controls, proportion=0, flag=wx.TOP|wx.BOTTOM|wx.EXPAND, border=0)
#the status bar
self.status = wx.StatusBar(panel)
self.status.SetFieldsCount(1)
self.status.SetStatusStyles([wx.SB_FLAT])
self.status.SetStatusText("Welcome to %s" % (version.fancy_string))
main_vbox.Add(self.status, proportion=0, flag=wx.EXPAND, border=0)
self.Show(True)
def OnQuit(self, e):
self.Close()
def main():
Galactiscan(None)
|
pizzasgood/galactiscan
|
gui.py
|
Python
|
gpl-3.0
| 22,558
|
[
"Galaxy"
] |
5e21048435b5e88b7a7355998f35d07985259468b79ad2c8f2fd7d673b0a9f93
|
# cmdutil.py - help for command processing in mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, nullrev, short
from i18n import _
import os, sys, errno, re, tempfile
import util, scmutil, templater, patch, error, templatekw, revlog, copies
import match as matchmod
import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil
import changelog
import bookmarks
import lock as lockmod
def parsealiases(cmd):
return cmd.lstrip("^").split("|")
def findpossible(cmd, table, strict=False):
"""
Return cmd -> (aliases, command table entry)
for each matching command.
Return debug commands (or their aliases) only if no normal command matches.
"""
choice = {}
debugchoice = {}
if cmd in table:
# short-circuit exact matches, "log" alias beats "^log|history"
keys = [cmd]
else:
keys = table.keys()
for e in keys:
aliases = parsealiases(e)
found = None
if cmd in aliases:
found = cmd
elif not strict:
for a in aliases:
if a.startswith(cmd):
found = a
break
if found is not None:
if aliases[0].startswith("debug") or found.startswith("debug"):
debugchoice[found] = (aliases, table[e])
else:
choice[found] = (aliases, table[e])
if not choice and debugchoice:
choice = debugchoice
return choice
def findcmd(cmd, table, strict=True):
"""Return (aliases, command table entry) for command string."""
choice = findpossible(cmd, table, strict)
if cmd in choice:
return choice[cmd]
if len(choice) > 1:
clist = choice.keys()
clist.sort()
raise error.AmbiguousCommand(cmd, clist)
if choice:
return choice.values()[0]
raise error.UnknownCommand(cmd)
def findrepo(p):
while not os.path.isdir(os.path.join(p, ".hg")):
oldp, p = p, os.path.dirname(p)
if p == oldp:
return None
return p
def bailifchanged(repo):
if repo.dirstate.p2() != nullid:
raise util.Abort(_('outstanding uncommitted merge'))
modified, added, removed, deleted = repo.status()[:4]
if modified or added or removed or deleted:
raise util.Abort(_('uncommitted changes'))
ctx = repo[None]
for s in sorted(ctx.substate):
if ctx.sub(s).dirty():
raise util.Abort(_("uncommitted changes in subrepo %s") % s)
def logmessage(ui, opts):
""" get the log message according to -m and -l option """
message = opts.get('message')
logfile = opts.get('logfile')
if message and logfile:
raise util.Abort(_('options --message and --logfile are mutually '
'exclusive'))
if not message and logfile:
try:
if logfile == '-':
message = ui.fin.read()
else:
message = '\n'.join(util.readfile(logfile).splitlines())
except IOError, inst:
raise util.Abort(_("can't read commit message '%s': %s") %
(logfile, inst.strerror))
return message
def loglimit(opts):
"""get the log limit according to option -l/--limit"""
limit = opts.get('limit')
if limit:
try:
limit = int(limit)
except ValueError:
raise util.Abort(_('limit must be a positive integer'))
if limit <= 0:
raise util.Abort(_('limit must be positive'))
else:
limit = None
return limit
def makefilename(repo, pat, node, desc=None,
total=None, seqno=None, revwidth=None, pathname=None):
node_expander = {
'H': lambda: hex(node),
'R': lambda: str(repo.changelog.rev(node)),
'h': lambda: short(node),
'm': lambda: re.sub('[^\w]', '_', str(desc))
}
expander = {
'%': lambda: '%',
'b': lambda: os.path.basename(repo.root),
}
try:
if node:
expander.update(node_expander)
if node:
expander['r'] = (lambda:
str(repo.changelog.rev(node)).zfill(revwidth or 0))
if total is not None:
expander['N'] = lambda: str(total)
if seqno is not None:
expander['n'] = lambda: str(seqno)
if total is not None and seqno is not None:
expander['n'] = lambda: str(seqno).zfill(len(str(total)))
if pathname is not None:
expander['s'] = lambda: os.path.basename(pathname)
expander['d'] = lambda: os.path.dirname(pathname) or '.'
expander['p'] = lambda: pathname
newname = []
patlen = len(pat)
i = 0
while i < patlen:
c = pat[i]
if c == '%':
i += 1
c = pat[i]
c = expander[c]()
newname.append(c)
i += 1
return ''.join(newname)
except KeyError, inst:
raise util.Abort(_("invalid format spec '%%%s' in output filename") %
inst.args[0])
def makefileobj(repo, pat, node=None, desc=None, total=None,
seqno=None, revwidth=None, mode='wb', modemap=None,
pathname=None):
writable = mode not in ('r', 'rb')
if not pat or pat == '-':
fp = writable and repo.ui.fout or repo.ui.fin
if util.safehasattr(fp, 'fileno'):
return os.fdopen(os.dup(fp.fileno()), mode)
else:
# if this fp can't be duped properly, return
# a dummy object that can be closed
class wrappedfileobj(object):
noop = lambda x: None
def __init__(self, f):
self.f = f
def __getattr__(self, attr):
if attr == 'close':
return self.noop
else:
return getattr(self.f, attr)
return wrappedfileobj(fp)
if util.safehasattr(pat, 'write') and writable:
return pat
if util.safehasattr(pat, 'read') and 'r' in mode:
return pat
fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
if modemap is not None:
mode = modemap.get(fn, mode)
if mode == 'wb':
modemap[fn] = 'ab'
return open(fn, mode)
def openrevlog(repo, cmd, file_, opts):
"""opens the changelog, manifest, a filelog or a given revlog"""
cl = opts['changelog']
mf = opts['manifest']
msg = None
if cl and mf:
msg = _('cannot specify --changelog and --manifest at the same time')
elif cl or mf:
if file_:
msg = _('cannot specify filename with --changelog or --manifest')
elif not repo:
msg = _('cannot specify --changelog or --manifest '
'without a repository')
if msg:
raise util.Abort(msg)
r = None
if repo:
if cl:
r = repo.changelog
elif mf:
r = repo.manifest
elif file_:
filelog = repo.file(file_)
if len(filelog):
r = filelog
if not r:
if not file_:
raise error.CommandError(cmd, _('invalid arguments'))
if not os.path.isfile(file_):
raise util.Abort(_("revlog '%s' not found") % file_)
r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
file_[:-2] + ".i")
return r
def copy(ui, repo, pats, opts, rename=False):
# called with the repo lock held
#
# hgsep => pathname that uses "/" to separate directories
# ossep => pathname that uses os.sep to separate directories
cwd = repo.getcwd()
targets = {}
after = opts.get("after")
dryrun = opts.get("dry_run")
wctx = repo[None]
def walkpat(pat):
srcs = []
badstates = after and '?' or '?r'
m = scmutil.match(repo[None], [pat], opts, globbed=True)
for abs in repo.walk(m):
state = repo.dirstate[abs]
rel = m.rel(abs)
exact = m.exact(abs)
if state in badstates:
if exact and state == '?':
ui.warn(_('%s: not copying - file is not managed\n') % rel)
if exact and state == 'r':
ui.warn(_('%s: not copying - file has been marked for'
' remove\n') % rel)
continue
# abs: hgsep
# rel: ossep
srcs.append((abs, rel, exact))
return srcs
# abssrc: hgsep
# relsrc: ossep
# otarget: ossep
def copyfile(abssrc, relsrc, otarget, exact):
abstarget = pathutil.canonpath(repo.root, cwd, otarget)
if '/' in abstarget:
# We cannot normalize abstarget itself, this would prevent
# case only renames, like a => A.
abspath, absname = abstarget.rsplit('/', 1)
abstarget = repo.dirstate.normalize(abspath) + '/' + absname
reltarget = repo.pathto(abstarget, cwd)
target = repo.wjoin(abstarget)
src = repo.wjoin(abssrc)
state = repo.dirstate[abstarget]
scmutil.checkportable(ui, abstarget)
# check for collisions
prevsrc = targets.get(abstarget)
if prevsrc is not None:
ui.warn(_('%s: not overwriting - %s collides with %s\n') %
(reltarget, repo.pathto(abssrc, cwd),
repo.pathto(prevsrc, cwd)))
return
# check for overwrites
exists = os.path.lexists(target)
samefile = False
if exists and abssrc != abstarget:
if (repo.dirstate.normalize(abssrc) ==
repo.dirstate.normalize(abstarget)):
if not rename:
ui.warn(_("%s: can't copy - same file\n") % reltarget)
return
exists = False
samefile = True
if not after and exists or after and state in 'mn':
if not opts['force']:
ui.warn(_('%s: not overwriting - file exists\n') %
reltarget)
return
if after:
if not exists:
if rename:
ui.warn(_('%s: not recording move - %s does not exist\n') %
(relsrc, reltarget))
else:
ui.warn(_('%s: not recording copy - %s does not exist\n') %
(relsrc, reltarget))
return
elif not dryrun:
try:
if exists:
os.unlink(target)
targetdir = os.path.dirname(target) or '.'
if not os.path.isdir(targetdir):
os.makedirs(targetdir)
if samefile:
tmp = target + "~hgrename"
os.rename(src, tmp)
os.rename(tmp, target)
else:
util.copyfile(src, target)
srcexists = True
except IOError, inst:
if inst.errno == errno.ENOENT:
ui.warn(_('%s: deleted in working copy\n') % relsrc)
srcexists = False
else:
ui.warn(_('%s: cannot copy - %s\n') %
(relsrc, inst.strerror))
return True # report a failure
if ui.verbose or not exact:
if rename:
ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
else:
ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
targets[abstarget] = abssrc
# fix up dirstate
scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
dryrun=dryrun, cwd=cwd)
if rename and not dryrun:
if not after and srcexists and not samefile:
util.unlinkpath(repo.wjoin(abssrc))
wctx.forget([abssrc])
# pat: ossep
# dest ossep
# srcs: list of (hgsep, hgsep, ossep, bool)
# return: function that takes hgsep and returns ossep
def targetpathfn(pat, dest, srcs):
if os.path.isdir(pat):
abspfx = pathutil.canonpath(repo.root, cwd, pat)
abspfx = util.localpath(abspfx)
if destdirexists:
striplen = len(os.path.split(abspfx)[0])
else:
striplen = len(abspfx)
if striplen:
striplen += len(os.sep)
res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
elif destdirexists:
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
res = lambda p: dest
return res
# pat: ossep
# dest ossep
# srcs: list of (hgsep, hgsep, ossep, bool)
# return: function that takes hgsep and returns ossep
def targetpathafterfn(pat, dest, srcs):
if matchmod.patkind(pat):
# a mercurial pattern
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
abspfx = pathutil.canonpath(repo.root, cwd, pat)
if len(abspfx) < len(srcs[0][0]):
# A directory. Either the target path contains the last
# component of the source path or it does not.
def evalpath(striplen):
score = 0
for s in srcs:
t = os.path.join(dest, util.localpath(s[0])[striplen:])
if os.path.lexists(t):
score += 1
return score
abspfx = util.localpath(abspfx)
striplen = len(abspfx)
if striplen:
striplen += len(os.sep)
if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
score = evalpath(striplen)
striplen1 = len(os.path.split(abspfx)[0])
if striplen1:
striplen1 += len(os.sep)
if evalpath(striplen1) > score:
striplen = striplen1
res = lambda p: os.path.join(dest,
util.localpath(p)[striplen:])
else:
# a file
if destdirexists:
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
res = lambda p: dest
return res
pats = scmutil.expandpats(pats)
if not pats:
raise util.Abort(_('no source or destination specified'))
if len(pats) == 1:
raise util.Abort(_('no destination specified'))
dest = pats.pop()
destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
if not destdirexists:
if len(pats) > 1 or matchmod.patkind(pats[0]):
raise util.Abort(_('with multiple sources, destination must be an '
'existing directory'))
if util.endswithsep(dest):
raise util.Abort(_('destination %s is not a directory') % dest)
tfn = targetpathfn
if after:
tfn = targetpathafterfn
copylist = []
for pat in pats:
srcs = walkpat(pat)
if not srcs:
continue
copylist.append((tfn(pat, dest, srcs), srcs))
if not copylist:
raise util.Abort(_('no files to copy'))
errors = 0
for targetpath, srcs in copylist:
for abssrc, relsrc, exact in srcs:
if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
errors += 1
if errors:
ui.warn(_('(consider using --after)\n'))
return errors != 0
def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
runargs=None, appendpid=False):
'''Run a command as a service.'''
def writepid(pid):
if opts['pid_file']:
mode = appendpid and 'a' or 'w'
fp = open(opts['pid_file'], mode)
fp.write(str(pid) + '\n')
fp.close()
if opts['daemon'] and not opts['daemon_pipefds']:
# Signal child process startup with file removal
lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
os.close(lockfd)
try:
if not runargs:
runargs = util.hgcmd() + sys.argv[1:]
runargs.append('--daemon-pipefds=%s' % lockpath)
# Don't pass --cwd to the child process, because we've already
# changed directory.
for i in xrange(1, len(runargs)):
if runargs[i].startswith('--cwd='):
del runargs[i]
break
elif runargs[i].startswith('--cwd'):
del runargs[i:i + 2]
break
def condfn():
return not os.path.exists(lockpath)
pid = util.rundetached(runargs, condfn)
if pid < 0:
raise util.Abort(_('child process failed to start'))
writepid(pid)
finally:
try:
os.unlink(lockpath)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if parentfn:
return parentfn(pid)
else:
return
if initfn:
initfn()
if not opts['daemon']:
writepid(os.getpid())
if opts['daemon_pipefds']:
lockpath = opts['daemon_pipefds']
try:
os.setsid()
except AttributeError:
pass
os.unlink(lockpath)
util.hidewindow()
sys.stdout.flush()
sys.stderr.flush()
nullfd = os.open(os.devnull, os.O_RDWR)
logfilefd = nullfd
if logfile:
logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
os.dup2(nullfd, 0)
os.dup2(logfilefd, 1)
os.dup2(logfilefd, 2)
if nullfd not in (0, 1, 2):
os.close(nullfd)
if logfile and logfilefd not in (0, 1, 2):
os.close(logfilefd)
if runfn:
return runfn()
def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
opts=None):
'''export changesets as hg patches.'''
total = len(revs)
revwidth = max([len(str(rev)) for rev in revs])
filemode = {}
def single(rev, seqno, fp):
ctx = repo[rev]
node = ctx.node()
parents = [p.node() for p in ctx.parents() if p]
branch = ctx.branch()
if switch_parent:
parents.reverse()
prev = (parents and parents[0]) or nullid
shouldclose = False
if not fp and len(template) > 0:
desc_lines = ctx.description().rstrip().split('\n')
desc = desc_lines[0] #Commit always has a first line.
fp = makefileobj(repo, template, node, desc=desc, total=total,
seqno=seqno, revwidth=revwidth, mode='wb',
modemap=filemode)
if fp != template:
shouldclose = True
if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
repo.ui.note("%s\n" % fp.name)
if not fp:
write = repo.ui.write
else:
def write(s, **kw):
fp.write(s)
write("# HG changeset patch\n")
write("# User %s\n" % ctx.user())
write("# Date %d %d\n" % ctx.date())
write("# %s\n" % util.datestr(ctx.date()))
if branch and branch != 'default':
write("# Branch %s\n" % branch)
write("# Node ID %s\n" % hex(node))
write("# Parent %s\n" % hex(prev))
if len(parents) > 1:
write("# Parent %s\n" % hex(parents[1]))
write(ctx.description().rstrip())
write("\n\n")
for chunk, label in patch.diffui(repo, prev, node, opts=opts):
write(chunk, label=label)
if shouldclose:
fp.close()
for seqno, rev in enumerate(revs):
single(rev, seqno + 1, fp)
def diffordiffstat(ui, repo, diffopts, node1, node2, match,
changes=None, stat=False, fp=None, prefix='',
listsubrepos=False):
'''show diff or diffstat.'''
if fp is None:
write = ui.write
else:
def write(s, **kw):
fp.write(s)
if stat:
diffopts = diffopts.copy(context=0)
width = 80
if not ui.plain():
width = ui.termwidth()
chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
prefix=prefix)
for chunk, label in patch.diffstatui(util.iterlines(chunks),
width=width,
git=diffopts.git):
write(chunk, label=label)
else:
for chunk, label in patch.diffui(repo, node1, node2, match,
changes, diffopts, prefix=prefix):
write(chunk, label=label)
if listsubrepos:
ctx1 = repo[node1]
ctx2 = repo[node2]
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
tempnode2 = node2
try:
if node2 is not None:
tempnode2 = ctx2.substate[subpath][1]
except KeyError:
# A subrepo that existed in node1 was deleted between node1 and
# node2 (inclusive). Thus, ctx2's substate won't contain that
# subpath. The best we can do is to ignore it.
tempnode2 = None
submatch = matchmod.narrowmatcher(subpath, match)
sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
stat=stat, fp=fp, prefix=prefix)
class changeset_printer(object):
'''show changeset information when templating not requested.'''
def __init__(self, ui, repo, patch, diffopts, buffered):
self.ui = ui
self.repo = repo
self.buffered = buffered
self.patch = patch
self.diffopts = diffopts
self.header = {}
self.hunk = {}
self.lastheader = None
self.footer = None
def flush(self, rev):
if rev in self.header:
h = self.header[rev]
if h != self.lastheader:
self.lastheader = h
self.ui.write(h)
del self.header[rev]
if rev in self.hunk:
self.ui.write(self.hunk[rev])
del self.hunk[rev]
return 1
return 0
def close(self):
if self.footer:
self.ui.write(self.footer)
def show(self, ctx, copies=None, matchfn=None, **props):
if self.buffered:
self.ui.pushbuffer()
self._show(ctx, copies, matchfn, props)
self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
else:
self._show(ctx, copies, matchfn, props)
def _show(self, ctx, copies, matchfn, props):
'''show a single changeset or file revision'''
changenode = ctx.node()
rev = ctx.rev()
if self.ui.quiet:
self.ui.write("%d:%s\n" % (rev, short(changenode)),
label='log.node')
return
log = self.repo.changelog
date = util.datestr(ctx.date())
hexfunc = self.ui.debugflag and hex or short
parents = [(p, hexfunc(log.node(p)))
for p in self._meaningful_parentrevs(log, rev)]
# i18n: column positioning for "hg log"
self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
label='log.changeset changeset.%s' % ctx.phasestr())
branch = ctx.branch()
# don't show the default branch name
if branch != 'default':
# i18n: column positioning for "hg log"
self.ui.write(_("branch: %s\n") % branch,
label='log.branch')
for bookmark in self.repo.nodebookmarks(changenode):
# i18n: column positioning for "hg log"
self.ui.write(_("bookmark: %s\n") % bookmark,
label='log.bookmark')
for tag in self.repo.nodetags(changenode):
# i18n: column positioning for "hg log"
self.ui.write(_("tag: %s\n") % tag,
label='log.tag')
if self.ui.debugflag and ctx.phase():
# i18n: column positioning for "hg log"
self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
label='log.phase')
for parent in parents:
# i18n: column positioning for "hg log"
self.ui.write(_("parent: %d:%s\n") % parent,
label='log.parent changeset.%s' % ctx.phasestr())
if self.ui.debugflag:
mnode = ctx.manifestnode()
# i18n: column positioning for "hg log"
self.ui.write(_("manifest: %d:%s\n") %
(self.repo.manifest.rev(mnode), hex(mnode)),
label='ui.debug log.manifest')
# i18n: column positioning for "hg log"
self.ui.write(_("user: %s\n") % ctx.user(),
label='log.user')
# i18n: column positioning for "hg log"
self.ui.write(_("date: %s\n") % date,
label='log.date')
if self.ui.debugflag:
files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
for key, value in zip([# i18n: column positioning for "hg log"
_("files:"),
# i18n: column positioning for "hg log"
_("files+:"),
# i18n: column positioning for "hg log"
_("files-:")], files):
if value:
self.ui.write("%-12s %s\n" % (key, " ".join(value)),
label='ui.debug log.files')
elif ctx.files() and self.ui.verbose:
# i18n: column positioning for "hg log"
self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
label='ui.note log.files')
if copies and self.ui.verbose:
copies = ['%s (%s)' % c for c in copies]
# i18n: column positioning for "hg log"
self.ui.write(_("copies: %s\n") % ' '.join(copies),
label='ui.note log.copies')
extra = ctx.extra()
if extra and self.ui.debugflag:
for key, value in sorted(extra.items()):
# i18n: column positioning for "hg log"
self.ui.write(_("extra: %s=%s\n")
% (key, value.encode('string_escape')),
label='ui.debug log.extra')
description = ctx.description().strip()
if description:
if self.ui.verbose:
self.ui.write(_("description:\n"),
label='ui.note log.description')
self.ui.write(description,
label='ui.note log.description')
self.ui.write("\n\n")
else:
# i18n: column positioning for "hg log"
self.ui.write(_("summary: %s\n") %
description.splitlines()[0],
label='log.summary')
self.ui.write("\n")
self.showpatch(changenode, matchfn)
def showpatch(self, node, matchfn):
if not matchfn:
matchfn = self.patch
if matchfn:
stat = self.diffopts.get('stat')
diff = self.diffopts.get('patch')
diffopts = patch.diffopts(self.ui, self.diffopts)
prev = self.repo.changelog.parents(node)[0]
if stat:
diffordiffstat(self.ui, self.repo, diffopts, prev, node,
match=matchfn, stat=True)
if diff:
if stat:
self.ui.write("\n")
diffordiffstat(self.ui, self.repo, diffopts, prev, node,
match=matchfn, stat=False)
self.ui.write("\n")
def _meaningful_parentrevs(self, log, rev):
"""Return list of meaningful (or all if debug) parentrevs for rev.
For merges (two non-nullrev revisions) both parents are meaningful.
Otherwise the first parent revision is considered meaningful if it
is not the preceding revision.
"""
parents = log.parentrevs(rev)
if not self.ui.debugflag and parents[1] == nullrev:
if parents[0] >= rev - 1:
parents = []
else:
parents = [parents[0]]
return parents
class changeset_templater(changeset_printer):
'''format changeset information.'''
def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
defaulttempl = {
'parent': '{rev}:{node|formatnode} ',
'manifest': '{rev}:{node|formatnode}',
'file_copy': '{name} ({source})',
'extra': '{key}={value|stringescape}'
}
# filecopy is preserved for compatibility reasons
defaulttempl['filecopy'] = defaulttempl['file_copy']
self.t = templater.templater(mapfile, {'formatnode': formatnode},
cache=defaulttempl)
self.cache = {}
def use_template(self, t):
'''set template string to use'''
self.t.cache['changeset'] = t
def _meaningful_parentrevs(self, ctx):
"""Return list of meaningful (or all if debug) parentrevs for rev.
"""
parents = ctx.parents()
if len(parents) > 1:
return parents
if self.ui.debugflag:
return [parents[0], self.repo['null']]
if parents[0].rev() >= ctx.rev() - 1:
return []
return parents
def _show(self, ctx, copies, matchfn, props):
'''show a single changeset or file revision'''
showlist = templatekw.showlist
# showparents() behaviour depends on ui trace level which
# causes unexpected behaviours at templating level and makes
# it harder to extract it in a standalone function. Its
# behaviour cannot be changed so leave it here for now.
def showparents(**args):
ctx = args['ctx']
parents = [[('rev', p.rev()), ('node', p.hex())]
for p in self._meaningful_parentrevs(ctx)]
return showlist('parent', parents, **args)
props = props.copy()
props.update(templatekw.keywords)
props['parents'] = showparents
props['templ'] = self.t
props['ctx'] = ctx
props['repo'] = self.repo
props['revcache'] = {'copies': copies}
props['cache'] = self.cache
# find correct templates for current mode
tmplmodes = [
(True, None),
(self.ui.verbose, 'verbose'),
(self.ui.quiet, 'quiet'),
(self.ui.debugflag, 'debug'),
]
types = {'header': '', 'footer':'', 'changeset': 'changeset'}
for mode, postfix in tmplmodes:
for type in types:
cur = postfix and ('%s_%s' % (type, postfix)) or type
if mode and cur in self.t:
types[type] = cur
try:
# write header
if types['header']:
h = templater.stringify(self.t(types['header'], **props))
if self.buffered:
self.header[ctx.rev()] = h
else:
if self.lastheader != h:
self.lastheader = h
self.ui.write(h)
# write changeset metadata, then patch if requested
key = types['changeset']
self.ui.write(templater.stringify(self.t(key, **props)))
self.showpatch(ctx.node(), matchfn)
if types['footer']:
if not self.footer:
self.footer = templater.stringify(self.t(types['footer'],
**props))
except KeyError, inst:
msg = _("%s: no key named '%s'")
raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
except SyntaxError, inst:
raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
def show_changeset(ui, repo, opts, buffered=False):
"""show one changeset using template or regular display.
Display format will be the first non-empty hit of:
1. option 'template'
2. option 'style'
3. [ui] setting 'logtemplate'
4. [ui] setting 'style'
If all of these values are either the unset or the empty string,
regular display via changeset_printer() is done.
"""
# options
patch = None
if opts.get('patch') or opts.get('stat'):
patch = scmutil.matchall(repo)
tmpl = opts.get('template')
style = None
if not tmpl:
style = opts.get('style')
# ui settings
if not (tmpl or style):
tmpl = ui.config('ui', 'logtemplate')
if tmpl:
try:
tmpl = templater.parsestring(tmpl)
except SyntaxError:
tmpl = templater.parsestring(tmpl, quoted=False)
else:
style = util.expandpath(ui.config('ui', 'style', ''))
if not (tmpl or style):
return changeset_printer(ui, repo, patch, opts, buffered)
mapfile = None
if style and not tmpl:
mapfile = style
if not os.path.split(mapfile)[0]:
mapname = (templater.templatepath('map-cmdline.' + mapfile)
or templater.templatepath(mapfile))
if mapname:
mapfile = mapname
try:
t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
except SyntaxError, inst:
raise util.Abort(inst.args[0])
if tmpl:
t.use_template(tmpl)
return t
def finddate(ui, repo, date):
"""Find the tipmost changeset that matches the given date spec"""
df = util.matchdate(date)
m = scmutil.matchall(repo)
results = {}
def prep(ctx, fns):
d = ctx.date()
if df(d[0]):
results[ctx.rev()] = d
for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
rev = ctx.rev()
if rev in results:
ui.status(_("found revision %s from %s\n") %
(rev, util.datestr(results[rev])))
return str(rev)
raise util.Abort(_("revision matching date not found"))
def increasingwindows(start, end, windowsize=8, sizelimit=512):
if start < end:
while start < end:
yield start, min(windowsize, end - start)
start += windowsize
if windowsize < sizelimit:
windowsize *= 2
else:
while start > end:
yield start, min(windowsize, start - end - 1)
start -= windowsize
if windowsize < sizelimit:
windowsize *= 2
class FileWalkError(Exception):
pass
def walkfilerevs(repo, match, follow, revs, fncache):
'''Walks the file history for the matched files.
Returns the changeset revs that are involved in the file history.
Throws FileWalkError if the file history can't be walked using
filelogs alone.
'''
wanted = set()
copies = []
minrev, maxrev = min(revs), max(revs)
def filerevgen(filelog, last):
"""
Only files, no patterns. Check the history of each file.
Examines filelog entries within minrev, maxrev linkrev range
Returns an iterator yielding (linkrev, parentlinkrevs, copied)
tuples in backwards order
"""
cl_count = len(repo)
revs = []
for j in xrange(0, last + 1):
linkrev = filelog.linkrev(j)
if linkrev < minrev:
continue
# only yield rev for which we have the changelog, it can
# happen while doing "hg log" during a pull or commit
if linkrev >= cl_count:
break
parentlinkrevs = []
for p in filelog.parentrevs(j):
if p != nullrev:
parentlinkrevs.append(filelog.linkrev(p))
n = filelog.node(j)
revs.append((linkrev, parentlinkrevs,
follow and filelog.renamed(n)))
return reversed(revs)
def iterfiles():
pctx = repo['.']
for filename in match.files():
if follow:
if filename not in pctx:
raise util.Abort(_('cannot follow file not in parent '
'revision: "%s"') % filename)
yield filename, pctx[filename].filenode()
else:
yield filename, None
for filename_node in copies:
yield filename_node
for file_, node in iterfiles():
filelog = repo.file(file_)
if not len(filelog):
if node is None:
# A zero count may be a directory or deleted file, so
# try to find matching entries on the slow path.
if follow:
raise util.Abort(
_('cannot follow nonexistent file: "%s"') % file_)
raise FileWalkError("Cannot walk via filelog")
else:
continue
if node is None:
last = len(filelog) - 1
else:
last = filelog.rev(node)
# keep track of all ancestors of the file
ancestors = set([filelog.linkrev(last)])
# iterate from latest to oldest revision
for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
if not follow:
if rev > maxrev:
continue
else:
# Note that last might not be the first interesting
# rev to us:
# if the file has been changed after maxrev, we'll
# have linkrev(last) > maxrev, and we still need
# to explore the file graph
if rev not in ancestors:
continue
# XXX insert 1327 fix here
if flparentlinkrevs:
ancestors.update(flparentlinkrevs)
fncache.setdefault(rev, []).append(file_)
wanted.add(rev)
if copied:
copies.append(copied)
return wanted
def walkchangerevs(repo, match, opts, prepare):
'''Iterate over files and the revs in which they changed.
Callers most commonly need to iterate backwards over the history
in which they are interested. Doing so has awful (quadratic-looking)
performance, so we use iterators in a "windowed" way.
We walk a window of revisions in the desired order. Within the
window, we first walk forwards to gather data, then in the desired
order (usually backwards) to display it.
This function returns an iterator yielding contexts. Before
yielding each context, the iterator will first call the prepare
function on each context in the window in forward order.'''
follow = opts.get('follow') or opts.get('follow_first')
if opts.get('rev'):
revs = scmutil.revrange(repo, opts.get('rev'))
elif follow:
revs = repo.revs('reverse(:.)')
else:
revs = list(repo)
revs.reverse()
if not revs:
return []
wanted = set()
slowpath = match.anypats() or (match.files() and opts.get('removed'))
fncache = {}
change = repo.changectx
# First step is to fill wanted, the set of revisions that we want to yield.
# When it does not induce extra cost, we also fill fncache for revisions in
# wanted: a cache of filenames that were changed (ctx.files()) and that
# match the file filtering conditions.
if not slowpath and not match.files():
# No files, no patterns. Display all revs.
wanted = set(revs)
if not slowpath and match.files():
# We only have to read through the filelog to find wanted revisions
try:
wanted = walkfilerevs(repo, match, follow, revs, fncache)
except FileWalkError:
slowpath = True
# We decided to fall back to the slowpath because at least one
# of the paths was not a file. Check to see if at least one of them
# existed in history, otherwise simply return
for path in match.files():
if path == '.' or path in repo.store:
break
else:
return []
if slowpath:
# We have to read the changelog to match filenames against
# changed files
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# The slow path checks files modified in every changeset.
# This is really slow on large repos, so compute the set lazily.
class lazywantedset(object):
def __init__(self):
self.set = set()
self.revs = set(revs)
# No need to worry about locality here because it will be accessed
# in the same order as the increasing window below.
def __contains__(self, value):
if value in self.set:
return True
elif not value in self.revs:
return False
else:
self.revs.discard(value)
ctx = change(value)
matches = filter(match, ctx.files())
if matches:
fncache[value] = matches
self.set.add(value)
return True
return False
def discard(self, value):
self.revs.discard(value)
self.set.discard(value)
wanted = lazywantedset()
class followfilter(object):
def __init__(self, onlyfirst=False):
self.startrev = nullrev
self.roots = set()
self.onlyfirst = onlyfirst
def match(self, rev):
def realparents(rev):
if self.onlyfirst:
return repo.changelog.parentrevs(rev)[0:1]
else:
return filter(lambda x: x != nullrev,
repo.changelog.parentrevs(rev))
if self.startrev == nullrev:
self.startrev = rev
return True
if rev > self.startrev:
# forward: all descendants
if not self.roots:
self.roots.add(self.startrev)
for parent in realparents(rev):
if parent in self.roots:
self.roots.add(rev)
return True
else:
# backwards: all parents
if not self.roots:
self.roots.update(realparents(self.startrev))
if rev in self.roots:
self.roots.remove(rev)
self.roots.update(realparents(rev))
return True
return False
# it might be worthwhile to do this in the iterator if the rev range
# is descending and the prune args are all within that range
for rev in opts.get('prune', ()):
rev = repo[rev].rev()
ff = followfilter()
stop = min(revs[0], revs[-1])
for x in xrange(rev, stop - 1, -1):
if ff.match(x):
wanted.discard(x)
# Choose a small initial window if we will probably only visit a
# few commits.
limit = loglimit(opts)
windowsize = 8
if limit:
windowsize = min(limit, windowsize)
# Now that wanted is correctly initialized, we can iterate over the
# revision range, yielding only revisions in wanted.
def iterate():
if follow and not match.files():
ff = followfilter(onlyfirst=opts.get('follow_first'))
def want(rev):
return ff.match(rev) and rev in wanted
else:
def want(rev):
return rev in wanted
for i, window in increasingwindows(0, len(revs), windowsize):
nrevs = [rev for rev in revs[i:i + window] if want(rev)]
for rev in sorted(nrevs):
fns = fncache.get(rev)
ctx = change(rev)
if not fns:
def fns_generator():
for f in ctx.files():
if match(f):
yield f
fns = fns_generator()
prepare(ctx, fns)
for rev in nrevs:
yield change(rev)
return iterate()
def _makegraphfilematcher(repo, pats, followfirst):
# When displaying a revision with --patch --follow FILE, we have
# to know which file of the revision must be diffed. With
# --follow, we want the names of the ancestors of FILE in the
# revision, stored in "fcache". "fcache" is populated by
# reproducing the graph traversal already done by --follow revset
# and relating linkrevs to file names (which is not "correct" but
# good enough).
fcache = {}
fcacheready = [False]
pctx = repo['.']
wctx = repo[None]
def populate():
for fn in pats:
for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
for c in i:
fcache.setdefault(c.linkrev(), set()).add(c.path())
def filematcher(rev):
if not fcacheready[0]:
# Lazy initialization
fcacheready[0] = True
populate()
return scmutil.match(wctx, fcache.get(rev, []), default='path')
return filematcher
def _makegraphlogrevset(repo, pats, opts, revs):
"""Return (expr, filematcher) where expr is a revset string built
from log options and file patterns or None. If --stat or --patch
are not passed filematcher is None. Otherwise it is a callable
taking a revision number and returning a match objects filtering
the files to be detailed when displaying the revision.
"""
opt2revset = {
'no_merges': ('not merge()', None),
'only_merges': ('merge()', None),
'_ancestors': ('ancestors(%(val)s)', None),
'_fancestors': ('_firstancestors(%(val)s)', None),
'_descendants': ('descendants(%(val)s)', None),
'_fdescendants': ('_firstdescendants(%(val)s)', None),
'_matchfiles': ('_matchfiles(%(val)s)', None),
'date': ('date(%(val)r)', None),
'branch': ('branch(%(val)r)', ' or '),
'_patslog': ('filelog(%(val)r)', ' or '),
'_patsfollow': ('follow(%(val)r)', ' or '),
'_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
'keyword': ('keyword(%(val)r)', ' or '),
'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
'user': ('user(%(val)r)', ' or '),
}
opts = dict(opts)
# follow or not follow?
follow = opts.get('follow') or opts.get('follow_first')
followfirst = opts.get('follow_first') and 1 or 0
# --follow with FILE behaviour depends on revs...
startrev = revs[0]
followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
# branch and only_branch are really aliases and must be handled at
# the same time
opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
# pats/include/exclude are passed to match.match() directly in
# _matchfiles() revset but walkchangerevs() builds its matcher with
# scmutil.match(). The difference is input pats are globbed on
# platforms without shell expansion (windows).
pctx = repo[None]
match, pats = scmutil.matchandpats(pctx, pats, opts)
slowpath = match.anypats() or (match.files() and opts.get('removed'))
if not slowpath:
for f in match.files():
if follow and f not in pctx:
raise util.Abort(_('cannot follow file not in parent '
'revision: "%s"') % f)
filelog = repo.file(f)
if not filelog:
# A zero count may be a directory or deleted file, so
# try to find matching entries on the slow path.
if follow:
raise util.Abort(
_('cannot follow nonexistent file: "%s"') % f)
slowpath = True
# We decided to fall back to the slowpath because at least one
# of the paths was not a file. Check to see if at least one of them
# existed in history - in that case, we'll continue down the
# slowpath; otherwise, we can turn off the slowpath
if slowpath:
for path in match.files():
if path == '.' or path in repo.store:
break
else:
slowpath = False
if slowpath:
# See walkchangerevs() slow path.
#
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# pats/include/exclude cannot be represented as separate
# revset expressions as their filtering logic applies at file
# level. For instance "-I a -X a" matches a revision touching
# "a" and "b" while "file(a) and not file(b)" does
# not. Besides, filesets are evaluated against the working
# directory.
matchargs = ['r:', 'd:relpath']
for p in pats:
matchargs.append('p:' + p)
for p in opts.get('include', []):
matchargs.append('i:' + p)
for p in opts.get('exclude', []):
matchargs.append('x:' + p)
matchargs = ','.join(('%r' % p) for p in matchargs)
opts['_matchfiles'] = matchargs
else:
if follow:
fpats = ('_patsfollow', '_patsfollowfirst')
fnopats = (('_ancestors', '_fancestors'),
('_descendants', '_fdescendants'))
if pats:
# follow() revset interprets its file argument as a
# manifest entry, so use match.files(), not pats.
opts[fpats[followfirst]] = list(match.files())
else:
opts[fnopats[followdescendants][followfirst]] = str(startrev)
else:
opts['_patslog'] = list(pats)
filematcher = None
if opts.get('patch') or opts.get('stat'):
if follow:
filematcher = _makegraphfilematcher(repo, pats, followfirst)
else:
filematcher = lambda rev: match
expr = []
for op, val in opts.iteritems():
if not val:
continue
if op not in opt2revset:
continue
revop, andor = opt2revset[op]
if '%(val)' not in revop:
expr.append(revop)
else:
if not isinstance(val, list):
e = revop % {'val': val}
else:
e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
expr.append(e)
if expr:
expr = '(' + ' and '.join(expr) + ')'
else:
expr = None
return expr, filematcher
def getgraphlogrevs(repo, pats, opts):
"""Return (revs, expr, filematcher) where revs is an iterable of
revision numbers, expr is a revset string built from log options
and file patterns or None, and used to filter 'revs'. If --stat or
--patch are not passed filematcher is None. Otherwise it is a
callable taking a revision number and returning a match objects
filtering the files to be detailed when displaying the revision.
"""
if not len(repo):
return [], None, None
limit = loglimit(opts)
# Default --rev value depends on --follow but --follow behaviour
# depends on revisions resolved from --rev...
follow = opts.get('follow') or opts.get('follow_first')
possiblyunsorted = False # whether revs might need sorting
if opts.get('rev'):
revs = scmutil.revrange(repo, opts['rev'])
# Don't sort here because _makegraphlogrevset might depend on the
# order of revs
possiblyunsorted = True
else:
if follow and len(repo) > 0:
revs = repo.revs('reverse(:.)')
else:
revs = list(repo.changelog)
revs.reverse()
if not revs:
return [], None, None
expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
if possiblyunsorted:
revs.sort(reverse=True)
if expr:
# Revset matchers often operate faster on revisions in changelog
# order, because most filters deal with the changelog.
revs.reverse()
matcher = revset.match(repo.ui, expr)
# Revset matches can reorder revisions. "A or B" typically returns
# returns the revision matching A then the revision matching B. Sort
# again to fix that.
revs = matcher(repo, revs)
revs.sort(reverse=True)
if limit is not None:
revs = revs[:limit]
return revs, expr, filematcher
def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
filematcher=None):
seen, state = [], graphmod.asciistate()
for rev, type, ctx, parents in dag:
char = 'o'
if ctx.node() in showparents:
char = '@'
elif ctx.obsolete():
char = 'x'
copies = None
if getrenamed and ctx.rev():
copies = []
for fn in ctx.files():
rename = getrenamed(fn, ctx.rev())
if rename:
copies.append((fn, rename[0]))
revmatchfn = None
if filematcher is not None:
revmatchfn = filematcher(ctx.rev())
displayer.show(ctx, copies=copies, matchfn=revmatchfn)
lines = displayer.hunk.pop(rev).split('\n')
if not lines[-1]:
del lines[-1]
displayer.flush(rev)
edges = edgefn(type, char, lines, seen, rev, parents)
for type, char, lines, coldata in edges:
graphmod.ascii(ui, state, type, char, lines, coldata)
displayer.close()
def graphlog(ui, repo, *pats, **opts):
# Parameters are identical to log command ones
revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
revdag = graphmod.dagwalker(repo, revs)
getrenamed = None
if opts.get('copies'):
endrev = None
if opts.get('rev'):
endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
displayer = show_changeset(ui, repo, opts, buffered=True)
showparents = [ctx.node() for ctx in repo[None].parents()]
displaygraph(ui, revdag, displayer, showparents,
graphmod.asciiedges, getrenamed, filematcher)
def checkunsupportedgraphflags(pats, opts):
for op in ["newest_first"]:
if op in opts and opts[op]:
raise util.Abort(_("-G/--graph option is incompatible with --%s")
% op.replace("_", "-"))
def graphrevs(repo, nodes, opts):
limit = loglimit(opts)
nodes.reverse()
if limit is not None:
nodes = nodes[:limit]
return graphmod.nodes(repo, nodes)
def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
join = lambda f: os.path.join(prefix, f)
bad = []
oldbad = match.bad
match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
names = []
wctx = repo[None]
cca = None
abort, warn = scmutil.checkportabilityalert(ui)
if abort or warn:
cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
for f in repo.walk(match):
exact = match.exact(f)
if exact or not explicitonly and f not in repo.dirstate:
if cca:
cca(f)
names.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s\n') % match.rel(join(f)))
for subpath in sorted(wctx.substate):
sub = wctx.sub(subpath)
try:
submatch = matchmod.narrowmatcher(subpath, match)
if listsubrepos:
bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
False))
else:
bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
True))
except error.LookupError:
ui.status(_("skipping missing subrepository: %s\n")
% join(subpath))
if not dryrun:
rejected = wctx.add(names, prefix)
bad.extend(f for f in rejected if f in match.files())
return bad
def forget(ui, repo, match, prefix, explicitonly):
join = lambda f: os.path.join(prefix, f)
bad = []
oldbad = match.bad
match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
wctx = repo[None]
forgot = []
s = repo.status(match=match, clean=True)
forget = sorted(s[0] + s[1] + s[3] + s[6])
if explicitonly:
forget = [f for f in forget if match.exact(f)]
for subpath in sorted(wctx.substate):
sub = wctx.sub(subpath)
try:
submatch = matchmod.narrowmatcher(subpath, match)
subbad, subforgot = sub.forget(ui, submatch, prefix)
bad.extend([subpath + '/' + f for f in subbad])
forgot.extend([subpath + '/' + f for f in subforgot])
except error.LookupError:
ui.status(_("skipping missing subrepository: %s\n")
% join(subpath))
if not explicitonly:
for f in match.files():
if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
if f not in forgot:
if os.path.exists(match.rel(join(f))):
ui.warn(_('not removing %s: '
'file is already untracked\n')
% match.rel(join(f)))
bad.append(f)
for f in forget:
if ui.verbose or not match.exact(f):
ui.status(_('removing %s\n') % match.rel(join(f)))
rejected = wctx.forget(forget, prefix)
bad.extend(f for f in rejected if f in match.files())
forgot.extend(forget)
return bad, forgot
def duplicatecopies(repo, rev, fromrev):
'''reproduce copies from fromrev to rev in the dirstate'''
for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
# copies.pathcopies returns backward renames, so dst might not
# actually be in the dirstate
if repo.dirstate[dst] in "nma":
repo.dirstate.copy(src, dst)
def commit(ui, repo, commitfunc, pats, opts):
'''commit the specified files or all outstanding changes'''
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
message = logmessage(ui, opts)
# extract addremove carefully -- this function can be called from a command
# that doesn't support addremove
if opts.get('addremove'):
scmutil.addremove(repo, pats, opts)
return commitfunc(ui, repo, message,
scmutil.match(repo[None], pats, opts), opts)
def amend(ui, repo, commitfunc, old, extra, pats, opts):
ui.note(_('amending changeset %s\n') % old)
base = old.p1()
wlock = lock = newid = None
try:
wlock = repo.wlock()
lock = repo.lock()
tr = repo.transaction('amend')
try:
# See if we got a message from -m or -l, if not, open the editor
# with the message of the changeset to amend
message = logmessage(ui, opts)
# ensure logfile does not conflict with later enforcement of the
# message. potential logfile content has been processed by
# `logmessage` anyway.
opts.pop('logfile')
# First, do a regular commit to record all changes in the working
# directory (if there are any)
ui.callhooks = False
currentbookmark = repo._bookmarkcurrent
try:
repo._bookmarkcurrent = None
opts['message'] = 'temporary amend commit for %s' % old
node = commit(ui, repo, commitfunc, pats, opts)
finally:
repo._bookmarkcurrent = currentbookmark
ui.callhooks = True
ctx = repo[node]
# Participating changesets:
#
# node/ctx o - new (intermediate) commit that contains changes
# | from working dir to go into amending commit
# | (or a workingctx if there were no changes)
# |
# old o - changeset to amend
# |
# base o - parent of amending changeset
# Update extra dict from amended commit (e.g. to preserve graft
# source)
extra.update(old.extra())
# Also update it from the intermediate commit or from the wctx
extra.update(ctx.extra())
if len(old.parents()) > 1:
# ctx.files() isn't reliable for merges, so fall back to the
# slower repo.status() method
files = set([fn for st in repo.status(base, old)[:3]
for fn in st])
else:
files = set(old.files())
# Second, we use either the commit we just did, or if there were no
# changes the parent of the working directory as the version of the
# files in the final amend commit
if node:
ui.note(_('copying changeset %s to %s\n') % (ctx, base))
user = ctx.user()
date = ctx.date()
# Recompute copies (avoid recording a -> b -> a)
copied = copies.pathcopies(base, ctx)
# Prune files which were reverted by the updates: if old
# introduced file X and our intermediate commit, node,
# renamed that file, then those two files are the same and
# we can discard X from our list of files. Likewise if X
# was deleted, it's no longer relevant
files.update(ctx.files())
def samefile(f):
if f in ctx.manifest():
a = ctx.filectx(f)
if f in base.manifest():
b = base.filectx(f)
return (not a.cmp(b)
and a.flags() == b.flags())
else:
return False
else:
return f not in base.manifest()
files = [f for f in files if not samefile(f)]
def filectxfn(repo, ctx_, path):
try:
fctx = ctx[path]
flags = fctx.flags()
mctx = context.memfilectx(fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
copied=copied.get(path))
return mctx
except KeyError:
raise IOError
else:
ui.note(_('copying changeset %s to %s\n') % (old, base))
# Use version of files as in the old cset
def filectxfn(repo, ctx_, path):
try:
return old.filectx(path)
except KeyError:
raise IOError
user = opts.get('user') or old.user()
date = opts.get('date') or old.date()
editmsg = False
if not message:
editmsg = True
message = old.description()
pureextra = extra.copy()
extra['amend_source'] = old.hex()
new = context.memctx(repo,
parents=[base.node(), old.p2().node()],
text=message,
files=files,
filectxfn=filectxfn,
user=user,
date=date,
extra=extra)
if editmsg:
new._text = commitforceeditor(repo, new, [])
newdesc = changelog.stripdesc(new.description())
if ((not node)
and newdesc == old.description()
and user == old.user()
and date == old.date()
and pureextra == old.extra()):
# nothing changed. continuing here would create a new node
# anyway because of the amend_source noise.
#
# This not what we expect from amend.
return old.node()
ph = repo.ui.config('phases', 'new-commit', phases.draft)
try:
repo.ui.setconfig('phases', 'new-commit', old.phase())
newid = repo.commitctx(new)
finally:
repo.ui.setconfig('phases', 'new-commit', ph)
if newid != old.node():
# Reroute the working copy parent to the new changeset
repo.setparents(newid, nullid)
# Move bookmarks from old parent to amend commit
bms = repo.nodebookmarks(old.node())
if bms:
marks = repo._bookmarks
for bm in bms:
marks[bm] = newid
marks.write()
#commit the whole amend process
if obsolete._enabled and newid != old.node():
# mark the new changeset as successor of the rewritten one
new = repo[newid]
obs = [(old, (new,))]
if node:
obs.append((ctx, ()))
obsolete.createmarkers(repo, obs)
tr.close()
finally:
tr.release()
if (not obsolete._enabled) and newid != old.node():
# Strip the intermediate commit (if there was one) and the amended
# commit
if node:
ui.note(_('stripping intermediate changeset %s\n') % ctx)
ui.note(_('stripping amended changeset %s\n') % old)
repair.strip(ui, repo, old.node(), topic='amend-backup')
finally:
if newid is None:
repo.dirstate.invalidate()
lockmod.release(lock, wlock)
return newid
def commiteditor(repo, ctx, subs):
if ctx.description():
return ctx.description()
return commitforceeditor(repo, ctx, subs)
def commitforceeditor(repo, ctx, subs):
edittext = []
modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
if ctx.description():
edittext.append(ctx.description())
edittext.append("")
edittext.append("") # Empty line between message and comments.
edittext.append(_("HG: Enter commit message."
" Lines beginning with 'HG:' are removed."))
edittext.append(_("HG: Leave message empty to abort commit."))
edittext.append("HG: --")
edittext.append(_("HG: user: %s") % ctx.user())
if ctx.p2():
edittext.append(_("HG: branch merge"))
if ctx.branch():
edittext.append(_("HG: branch '%s'") % ctx.branch())
if bookmarks.iscurrent(repo):
edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
edittext.extend([_("HG: subrepo %s") % s for s in subs])
edittext.extend([_("HG: added %s") % f for f in added])
edittext.extend([_("HG: changed %s") % f for f in modified])
edittext.extend([_("HG: removed %s") % f for f in removed])
if not added and not modified and not removed:
edittext.append(_("HG: no files changed"))
edittext.append("")
# run editor in the repository root
olddir = os.getcwd()
os.chdir(repo.root)
text = repo.ui.edit("\n".join(edittext), ctx.user())
text = re.sub("(?m)^HG:.*(\n|$)", "", text)
os.chdir(olddir)
if not text.strip():
raise util.Abort(_("empty commit message"))
return text
def commitstatus(repo, node, branch, bheads=None, opts={}):
ctx = repo[node]
parents = ctx.parents()
if (not opts.get('amend') and bheads and node not in bheads and not
[x for x in parents if x.node() in bheads and x.branch() == branch]):
repo.ui.status(_('created new head\n'))
# The message is not printed for initial roots. For the other
# changesets, it is printed in the following situations:
#
# Par column: for the 2 parents with ...
# N: null or no parent
# B: parent is on another named branch
# C: parent is a regular non head changeset
# H: parent was a branch head of the current branch
# Msg column: whether we print "created new head" message
# In the following, it is assumed that there already exists some
# initial branch heads of the current branch, otherwise nothing is
# printed anyway.
#
# Par Msg Comment
# N N y additional topo root
#
# B N y additional branch root
# C N y additional topo head
# H N n usual case
#
# B B y weird additional branch root
# C B y branch merge
# H B n merge with named branch
#
# C C y additional head from merge
# C H n merge with a head
#
# H H n head merge: head count decreases
if not opts.get('close_branch'):
for r in parents:
if r.closesbranch() and r.branch() == branch:
repo.ui.status(_('reopening closed branch head %d\n') % r)
if repo.ui.debugflag:
repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
elif repo.ui.verbose:
repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
def revert(ui, repo, ctx, parents, *pats, **opts):
parent, p2 = parents
node = ctx.node()
mf = ctx.manifest()
if node == parent:
pmf = mf
else:
pmf = None
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
names = {}
wlock = repo.wlock()
try:
# walk dirstate.
m = scmutil.match(repo[None], pats, opts)
m.bad = lambda x, y: False
for abs in repo.walk(m):
names[abs] = m.rel(abs), m.exact(abs)
# walk target manifest.
def badfn(path, msg):
if path in names:
return
if path in ctx.substate:
return
path_ = path + '/'
for f in names:
if f.startswith(path_):
return
ui.warn("%s: %s\n" % (m.rel(path), msg))
m = scmutil.match(ctx, pats, opts)
m.bad = badfn
for abs in ctx.walk(m):
if abs not in names:
names[abs] = m.rel(abs), m.exact(abs)
# get the list of subrepos that must be reverted
targetsubs = sorted(s for s in ctx.substate if m(s))
m = scmutil.matchfiles(repo, names)
changes = repo.status(match=m)[:4]
modified, added, removed, deleted = map(set, changes)
# if f is a rename, also revert the source
cwd = repo.getcwd()
for f in added:
src = repo.dirstate.copied(f)
if src and src not in names and repo.dirstate[src] == 'r':
removed.add(src)
names[src] = (repo.pathto(src, cwd), True)
def removeforget(abs):
if repo.dirstate[abs] == 'a':
return _('forgetting %s\n')
return _('removing %s\n')
revert = ([], _('reverting %s\n'))
add = ([], _('adding %s\n'))
remove = ([], removeforget)
undelete = ([], _('undeleting %s\n'))
disptable = (
# dispatch table:
# file state
# action if in target manifest
# action if not in target manifest
# make backup if in target manifest
# make backup if not in target manifest
(modified, revert, remove, True, True),
(added, revert, remove, True, False),
(removed, undelete, None, True, False),
(deleted, revert, remove, False, False),
)
for abs, (rel, exact) in sorted(names.items()):
mfentry = mf.get(abs)
target = repo.wjoin(abs)
def handle(xlist, dobackup):
xlist[0].append(abs)
if (dobackup and not opts.get('no_backup') and
os.path.lexists(target) and
abs in ctx and repo[None][abs].cmp(ctx[abs])):
bakname = "%s.orig" % rel
ui.note(_('saving current version of %s as %s\n') %
(rel, bakname))
if not opts.get('dry_run'):
util.rename(target, bakname)
if ui.verbose or not exact:
msg = xlist[1]
if not isinstance(msg, basestring):
msg = msg(abs)
ui.status(msg % rel)
for table, hitlist, misslist, backuphit, backupmiss in disptable:
if abs not in table:
continue
# file has changed in dirstate
if mfentry:
handle(hitlist, backuphit)
elif misslist is not None:
handle(misslist, backupmiss)
break
else:
if abs not in repo.dirstate:
if mfentry:
handle(add, True)
elif exact:
ui.warn(_('file not managed: %s\n') % rel)
continue
# file has not changed in dirstate
if node == parent:
if exact:
ui.warn(_('no changes needed to %s\n') % rel)
continue
if pmf is None:
# only need parent manifest in this unlikely case,
# so do not read by default
pmf = repo[parent].manifest()
if abs in pmf and mfentry:
# if version of file is same in parent and target
# manifests, do nothing
if (pmf[abs] != mfentry or
pmf.flags(abs) != mf.flags(abs)):
handle(revert, False)
else:
handle(remove, False)
if not opts.get('dry_run'):
def checkout(f):
fc = ctx[f]
repo.wwrite(f, fc.data(), fc.flags())
audit_path = pathutil.pathauditor(repo.root)
for f in remove[0]:
if repo.dirstate[f] == 'a':
repo.dirstate.drop(f)
continue
audit_path(f)
try:
util.unlinkpath(repo.wjoin(f))
except OSError:
pass
repo.dirstate.remove(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
if p2 != nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
for f in revert[0]:
checkout(f)
if normal:
normal(f)
for f in add[0]:
checkout(f)
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
if node == parent and p2 == nullid:
normal = repo.dirstate.normal
for f in undelete[0]:
checkout(f)
normal(f)
copied = copies.pathcopies(repo[parent], ctx)
for f in add[0] + undelete[0] + revert[0]:
if f in copied:
repo.dirstate.copy(copied[f], f)
if targetsubs:
# Revert the subrepos on the revert list
for sub in targetsubs:
ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
finally:
wlock.release()
def command(table):
'''returns a function object bound to table which can be used as
a decorator for populating table as a command table'''
def cmd(name, options=(), synopsis=None):
def decorator(func):
if synopsis:
table[name] = func, list(options), synopsis
else:
table[name] = func, list(options)
return func
return decorator
return cmd
# a list of (ui, repo) functions called by commands.summary
summaryhooks = util.hooks()
# A list of state files kept by multistep operations like graft.
# Since graft cannot be aborted, it is considered 'clearable' by update.
# note: bisect is intentionally excluded
# (state file, clearable, allowcommit, error, hint)
unfinishedstates = [
('graftstate', True, False, _('graft in progress'),
_("use 'hg graft --continue' or 'hg update' to abort")),
('updatestate', True, False, _('last update was interrupted'),
_("use 'hg update' to get a consistent checkout"))
]
def checkunfinished(repo, commit=False):
'''Look for an unfinished multistep operation, like graft, and abort
if found. It's probably good to check this right before
bailifchanged().
'''
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if commit and allowcommit:
continue
if repo.vfs.exists(f):
raise util.Abort(msg, hint=hint)
def clearunfinished(repo):
'''Check for unfinished operations (as above), and clear the ones
that are clearable.
'''
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if not clearable and repo.vfs.exists(f):
raise util.Abort(msg, hint=hint)
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if clearable and repo.vfs.exists(f):
util.unlink(repo.join(f))
|
spraints/for-example
|
mercurial/cmdutil.py
|
Python
|
gpl-2.0
| 80,076
|
[
"VisIt"
] |
296d766851cff8d127f259673cb18544aa4af79c42b6048ea8ebafe27dab937e
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for controller.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow.compat.v1 as tf
from tunas import basic_specs
from tunas import controller
from tunas import schema
class ControllerTest(tf.test.TestCase):
def assertOneHot(self, array):
self.assertLen(array.shape, 1)
argmax = np.argmax(array)
self.assertEqual(array[argmax], 1)
array_copy = np.copy(array)
array_copy[argmax] = 0
self.assertAllEqual(array_copy, [0]*len(array_copy))
def test_independent_sample_basic(self):
structure = {
'filters': schema.OneOf([48], basic_specs.FILTERS_TAG),
'opA': schema.OneOf(['foo', 'bar', 'baz'], basic_specs.OP_TAG),
'opB': schema.OneOf(['blah', 'yatta'], basic_specs.OP_TAG),
'other': schema.OneOf(['W', 'X', 'Y', 'Z'], 'some_other_tag'),
}
rl_structure, dist_info = controller.independent_sample(structure)
self.assertItemsEqual(structure.keys(), rl_structure.keys())
self.assertEqual(
{k: v.choices for (k, v) in structure.items()},
{k: v.choices for (k, v) in rl_structure.items()})
self.assertEqual(
{k: v.tag for (k, v) in structure.items()},
{k: v.tag for (k, v) in rl_structure.items()})
self.assertEqual(rl_structure['opA'].mask.shape, [3])
self.assertEqual(rl_structure['opB'].mask.shape, [2])
self.assertEqual(rl_structure['filters'].mask.shape, [1])
self.assertEqual(rl_structure['other'].mask.shape, [4])
self.evaluate(tf.global_variables_initializer())
self.assertEqual(dist_info['entropy'].shape, [])
self.assertEqual(dist_info['entropy'].dtype, tf.float32)
# Initially, all the logits are zero, so the entropy of a distribution with
# N possible choices is -log(N). We sum up the entropies of four different
# distributions, for opA, opB, filters, and other.
self.assertAlmostEqual(
self.evaluate(dist_info['entropy']),
math.log(1) + math.log(2) + math.log(3) + math.log(4))
self.assertEqual(dist_info['sample_log_prob'].shape, [])
self.assertEqual(dist_info['sample_log_prob'].dtype, tf.float32)
self.assertAlmostEqual(
self.evaluate(dist_info['sample_log_prob']),
math.log(1) + math.log(1/2) + math.log(1/3) + math.log(1/4))
# The controller will visit the elements of 'structure' in sorted order
# (based on their keys). So op_indices_0 will correspond to opA, and
# op_indices_1 will correspond to opB. All variables are initialized to 0.
self.assertItemsEqual(
dist_info['logits_by_tag'].keys(),
['op_indices_0',
'op_indices_1',
'filters_indices_0',
'some_other_tag_0'])
self.assertEqual(dist_info['logits_by_tag']['filters_indices_0'].shape, [1])
self.assertEqual(dist_info['logits_by_tag']['op_indices_0'].shape, [3])
self.assertEqual(dist_info['logits_by_tag']['op_indices_1'].shape, [2])
self.assertEqual(dist_info['logits_by_tag']['some_other_tag_0'].shape, [4])
# Repeat, but with logits grouped by path instead of tag.
self.assertItemsEqual(
dist_info['logits_by_path'],
['filters', 'opA', 'opB', 'other'])
self.assertEqual(dist_info['logits_by_path']['filters'].shape, [1])
self.assertEqual(dist_info['logits_by_path']['opA'].shape, [3])
self.assertEqual(dist_info['logits_by_path']['opB'].shape, [2])
self.assertEqual(dist_info['logits_by_path']['other'].shape, [4])
def test_independent_sample_increase_ops_probability_1(self):
structure = schema.OneOf(['foo', 'bar', 'baz'], basic_specs.OP_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_ops_probability=1.0)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(rl_structure.mask), [1/3, 1/3, 1/3])
self.assertEqual(self.evaluate(dist_info['sample_log_prob']), 0)
def test_independent_sample_increase_ops_probability_0(self):
structure = schema.OneOf(['foo', 'bar', 'baz'], basic_specs.OP_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_ops_probability=0.0)
self.evaluate(tf.global_variables_initializer())
self.assertOneHot(self.evaluate(rl_structure.mask))
self.assertAlmostEqual(
self.evaluate(dist_info['sample_log_prob']),
math.log(1/3))
def test_independent_sample_increase_ops_does_not_affect_filters(self):
structure = schema.OneOf([4, 8, 12], basic_specs.FILTERS_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_ops_probability=1.0)
self.evaluate(tf.global_variables_initializer())
self.assertOneHot(self.evaluate(rl_structure.mask))
self.assertAlmostEqual(
self.evaluate(dist_info['sample_log_prob']),
math.log(1/3))
def test_independent_sample_increase_filters_probability_1(self):
# Make sure that increase_filters does the right thing when the choices do
# not appear in sorted order.
structure = schema.OneOf([4, 12, 8], basic_specs.FILTERS_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_filters_probability=1.0)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(rl_structure.mask), [0, 1, 0])
self.assertEqual(self.evaluate(dist_info['sample_log_prob']), 0)
def test_independent_sample_increase_filters_probability_1_big_space(self):
# Use a large enough number of choices that we're unlikely to select the
# right one by random chance.
structure = schema.OneOf(list(range(100)), basic_specs.FILTERS_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_filters_probability=1.0)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(self.evaluate(rl_structure.mask), [0]*99 + [1])
self.assertEqual(self.evaluate(dist_info['sample_log_prob']), 0)
def test_independent_sample_increase_filters_probability_0(self):
structure = schema.OneOf([4, 12, 8], basic_specs.FILTERS_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_filters_probability=0.0)
self.evaluate(tf.global_variables_initializer())
self.assertOneHot(self.evaluate(rl_structure.mask))
self.assertAlmostEqual(
self.evaluate(dist_info['sample_log_prob']),
math.log(1/3))
def test_independent_sample_increase_ops_does_not_affect_ops(self):
structure = schema.OneOf([42, 64], basic_specs.OP_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_filters_probability=1.0)
self.evaluate(tf.global_variables_initializer())
self.assertOneHot(self.evaluate(rl_structure.mask))
self.assertAlmostEqual(
self.evaluate(dist_info['sample_log_prob']),
math.log(1/2))
def test_independent_sample_hierarchical(self):
structure = schema.OneOf(
[
schema.OneOf(['a', 'b', 'c'], basic_specs.OP_TAG),
schema.OneOf(['d', 'e', 'f', 'g'], basic_specs.OP_TAG),
], basic_specs.OP_TAG)
rl_structure, dist_info = controller.independent_sample(
structure, increase_ops_probability=0, increase_filters_probability=0,
hierarchical=True)
tensors = {
'outer_mask': rl_structure.mask,
'entropy': dist_info['entropy'],
'sample_log_prob': dist_info['sample_log_prob'],
}
self.evaluate(tf.global_variables_initializer())
for _ in range(10):
values = self.evaluate(tensors)
if np.all(values['outer_mask'] == np.array([1, 0])):
self.assertAlmostEqual(values['entropy'], math.log(2) + math.log(3))
self.assertAlmostEqual(
values['sample_log_prob'], math.log(1/2) + math.log(1/3))
elif np.all(values['outer_mask'] == np.array([0, 1])):
self.assertAlmostEqual(values['entropy'], math.log(2) + math.log(4))
self.assertAlmostEqual(
values['sample_log_prob'], math.log(1/2) + math.log(1/4))
else:
self.fail('Unexpected outer_mask: %s', values['outer_mask'])
def test_independent_sample_not_hierarchical(self):
structure = schema.OneOf(
[
schema.OneOf(['a', 'b', 'c'], basic_specs.OP_TAG),
schema.OneOf(['d', 'e', 'f', 'g'], basic_specs.OP_TAG),
], basic_specs.OP_TAG)
unused_rl_structure, dist_info = controller.independent_sample(
structure, increase_ops_probability=0, increase_filters_probability=0,
hierarchical=False)
tensors = {
'entropy': dist_info['entropy'],
'sample_log_prob': dist_info['sample_log_prob'],
}
self.evaluate(tf.global_variables_initializer())
for _ in range(10):
values = self.evaluate(tensors)
self.assertAlmostEqual(
values['entropy'], math.log(2) + math.log(3) + math.log(4))
self.assertAlmostEqual(
values['sample_log_prob'],
math.log(1/2) + math.log(1/3) + math.log(1/4))
def test_independent_sample_temperature(self):
structure = schema.OneOf(['foo', 'bar', 'baz'], basic_specs.OP_TAG)
temperature = tf.placeholder_with_default(
tf.constant(5.0, tf.float32), shape=(), name='temperature')
rl_structure, dist_info = controller.independent_sample(
structure, temperature=temperature)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
# Samples should be valid even when the temperature is set to a value
# other than 1.
self.assertOneHot(sess.run(rl_structure.mask))
# Before training, the sample log-probability and entropy shouldn't be
# affected by the temperature, since the probabilities are initialized
# to a uniform distribution.
self.assertAlmostEqual(
sess.run(dist_info['sample_log_prob']), math.log(1/3))
self.assertAlmostEqual(sess.run(dist_info['entropy']), math.log(3))
# The gradients should be multiplied by (1 / temperature).
# The OneOf has three possible choices. The gradient for the selected one
# will be positive, while the gradients for the other two will be
# negative. Since the selected choice can change between steps, we compare
# the max, which should always give us gradients w.r.t. the selected one.
trainable_vars = tf.trainable_variables()
self.assertLen(trainable_vars, 1)
grad_tensors = tf.gradients(dist_info['sample_log_prob'], trainable_vars)
grad1 = np.max(sess.run(grad_tensors[0], {temperature: 1.0}))
grad5 = np.max(sess.run(grad_tensors[0], {temperature: 5.0}))
self.assertAlmostEqual(grad1 / 5, grad5)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
google-research/google-research
|
tunas/controller_test.py
|
Python
|
apache-2.0
| 11,472
|
[
"VisIt"
] |
83cdb3056ac553a61487d2bc2e741d107985b4a0f95d7e9997a4ad6b282a4f7c
|
# import h5py
import numpy as np
# from pprint import pprint
# from termcolor import colored as c, cprint
# from IPython.display import FileLink
# from IPython import display
# from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
color_palette = ['#507ba6', '#f08e39', '#e0595c', '#79b8b3', '#5ca153',
'#edc854', '#af7ba2', '#fe9fa9', '#9c7561', '#bbb0ac']
from scipy.signal import savgol_filter
def smooth(samples, window_length=101, polyorder=3):
return savgol_filter(samples, window_length=window_length, polyorder=polyorder)
def smooth_ranges(samples, scale, floor=1, **kwargs):
centroids = np.mean(samples, axis=1)
smoothed = np.transpose(np.vstack((
np.minimum(smooth(samples[:, 0] - centroids, **kwargs), - floor),
np.maximum(smooth(samples[:, 1] - centroids, **kwargs), floor)
)))
return smoothed * scale + np.expand_dims(centroids, 1)
def smooth_ranges_2d(xranges, yranges, aspect_ratio='equal', *args, **kwargs):
smoothed_x = smooth_ranges(xranges, *args, **kwargs)
smoothed_y = smooth_ranges(yranges, *args, **kwargs)
if aspect_ratio == 'equal':
x_centroids = np.mean(smoothed_x, axis=1)
y_centroids = np.mean(smoothed_y, axis=1)
half_spread = np.maximum(np.abs(smoothed_x[:, 0] - x_centroids),
np.abs(smoothed_y[:, 0] - y_centroids))
return np.transpose(np.vstack((
x_centroids - half_spread, x_centroids + half_spread
))), np.transpose(np.vstack((
y_centroids - half_spread, y_centroids + half_spread
)))
else:
return smoothed_x, smoothed_y
def plot_deep_features(deep_features, labels, **kwargs):
bins = {}
for logit, deep_feature in zip(labels, deep_features):
label = np.argmax(logit)
# print(label)
try:
bins[str(label)].append(list(deep_feature))
except KeyError:
bins[str(label)] = [list(deep_feature)]
fig = plt.figure(figsize=(5, 5))
for numeral in map(str, range(10)):
try:
features = np.array(bins[numeral])
except KeyError:
print(numeral + " does not exist")
features = []
try:
x, y = np.transpose(features)
except ValueError:
x = [];
y = []
plt.scatter(
x,
y,
s=1,
color=color_palette[int(numeral)],
label=numeral
)
plt.legend(loc=(1.05, 0.1), frameon=False)
if 'title' in kwargs:
title = kwargs['title']
else:
title = 'MNIST LeNet++ with 2 Deep Features (PReLU)'
plt.title(title)
plt.xlabel('activation of hidden neuron 1')
plt.ylabel('activation of hidden neuron 2')
if 'xlim' in kwargs:
plt.xlim(kwargs['xlim'][0], kwargs['xlim'][1])
if 'ylim' in kwargs:
plt.ylim(kwargs['ylim'][0], kwargs['ylim'][1])
return fig
def text(message, loc, xrange, yrange,
horizontalalignment='right',
verticalalignment='bottom', **kwargs):
x = xrange[0] * (1 - loc[0]) + xrange[1] * loc[0]
y = yrange[0] * (1 - loc[1]) + yrange[1] * loc[1]
plt.text(x, y, message, horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment, **kwargs)
def plot(save=False, **kwargs):
if 'frame_prefix' in kwargs:
frame_prefix = kwargs['frame_prefix']
else:
frame_prefix = 'frame'
if save and 'frame_index' in kwargs:
fname = './figures/animation/' + frame_prefix + "_" + str(1000 + kwargs['frame_index'])[-3:] + '.png'
plt.savefig(fname, dpi=300, bbox_inches='tight')
plt.show()
# plt.close(fig)
elif save:
fname = './figures/animation/' + title + '.png'
plt.savefig(fname, dpi=300, bbox_inches='tight')
plt.show()
# plt.close(fig)
else:
plt.show()
|
kinshuk4/MoocX
|
misc/deep_learning_notes/Proj_Centroid_Loss_LeNet/LeNet_plus_centerloss/analysis.py
|
Python
|
mit
| 3,979
|
[
"NEURON"
] |
160a2d9fdac477c4af4e2530a0acf01fe8f6ee3b3e3ef338bd6881f52d55b4bc
|
#Some functions to help analysing DUT-8
import numpy as np
import sys
from ase import Atoms, neighborlist
import os
import pickle
def buildNL(mol, path='./', radii=None, save=True):
#create nl
if radii is None:
radii = {}
radii[ 'H'] = 0.30
radii[ 'C'] = 0.77
radii[ 'N'] = 0.70
radii[ 'O'] = 0.66
radii['Ni'] = 1.24
nAtoms = len(mol)
if (not os.path.isfile(os.path.join(path, 'neighborList.pickle'))) or (not save):
#create a list of cutoffs
cutOff = []
for j in range(0,nAtoms):
cutOff.append(radii[mol[j].symbol])
#initiate neighborlist
neighborList = neighborlist.NeighborList(cutOff,self_interaction=False,bothways=True)
neighborList.update(mol)
if save:
with open(os.path.join(path, 'neighborList.pickle'),'wb') as f:
pickle.dump(neighborList,f)
elif save:
with open(os.path.join(path, 'neighborList.pickle'),'rb') as f:
neighborList = pickle.load(f)
print("Bond Map created")
return neighborList
def getBenzNiAngleList(ana, imI):
"""Make a list of all occurences"""
if isinstance(imI, int):
imI = [imI]
dihedralList = []
for i in imI:
dihedralList.append([])
nAtoms = len(ana.images[i])
molecule = ana.images[i]
bondList = ana.all_bonds[i]
relDihedrals = ana.get_dihedrals('Ni', 'Ni', 'O', 'C')[i]
for dihed in relDihedrals:
nextC = [ idx for idx in bondList[dihed[-1]] if molecule[idx].symbol == 'C']
assert len(nextC) == 1
attachedCs = [ idx for idx in bondList[nextC[0]] if (molecule[idx].symbol == 'C') and (idx != dihed[-1])]
assert len(attachedCs) == 2
dists = [ molecule.get_distance(dihed[0], idx, mic=True) for idx in attachedCs ]
if dists[0] < dists[1]:
dihedralList[-1].append((dihed[0], dihed[1], attachedCs[1], attachedCs[0]))
else:
dihedralList[-1].append((dihed[0], dihed[1], attachedCs[0], attachedCs[1]))
return dihedralList
def getAlphaList(ana, imI):
"""Make a list of all occurences"""
from itertools import combinations
allIdx = ana._get_symbol_idxs(imI, 'Ni')
nAtoms = len(ana.images[imI])
molecule = ana.images[imI]
bondList = ana.all_bonds[imI]
dihedrals = ana.get_dihedrals('Ni', 'O', 'C', 'C')[0]
alphaList = []
for niIdx in allIdx:
relDihedrals = [ d for d in dihedrals if d[0] == niIdx ]
assert len(relDihedrals) == 4
s = set([d[-1] for d in relDihedrals])
if s not in alphaList:
alphaList.append(s)
r = []
for l in alphaList:
r.extend(list(combinations(list(l), 2)))
return [r]
def checkBonds(mol, bondList, cluster=False):
for iAtom, atom in enumerate(mol):
bondedIdx = bondList[iAtom]
bondedSym = [ mol[idx].symbol for idx in bondedIdx ]
if atom.symbol == 'C':
check = [ False, False, False, False ]
if (set(bondedSym) == set(['C','H'])) and (len(bondedIdx) == 3):
check[0] = True
elif (set(bondedSym) == set(['C','H','N'])) and (len(bondedIdx) == 4):
check[1] = True
elif (set(bondedSym) == set(['C'])) and (len(bondedIdx) == 3):
check[2] = True
elif (set(bondedSym) == set(['C','O'])) and (len(bondedIdx) == 3):
check[3] = True
if not any(check):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
elif atom.symbol == 'H':
if ((set(bondedSym) != set(['C'])) and (set(bondedSym) != set(['N']))) or (len(bondedIdx) != 1):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
elif atom.symbol == 'N':
if cluster:
if ((set(bondedSym) != set(['C','Ni'])) and (set(bondedSym) != set(['H','Ni'])) and (set(bondedSym) != set(['C']))) or (not len(bondedIdx) in [4,3]):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
else:
if ((set(bondedSym) != set(['C','Ni'])) and (set(bondedSym) != set(['H','Ni']))) or (len(bondedIdx) != 4):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
elif atom.symbol == 'O':
if (set(bondedSym) != set(['C','Ni'])) or (len(bondedIdx) != 2):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
elif atom.symbol == 'Ni':
if (set(bondedSym) != set(['Ni','O','N'])) or (len(bondedIdx) != 6):
raise RuntimeError("Atom {:} ({:}) bonded to {:}, these are {:}".format(iAtom, atom.symbol,str(bondedIdx),str(bondedSym)))
return True
|
patrickmelix/Python4ChemistryTools
|
moffunctions.py
|
Python
|
mit
| 5,110
|
[
"ASE"
] |
47546c5daeac3a03f84120bc0ddece4a2046247563164f7065c416f28fe696bc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Zappa CLI
Deploy arbitrary Python programs as serverless Zappa applications.
"""
from __future__ import unicode_literals
from __future__ import division
import argcomplete
import argparse
import base64
import pkgutil
import botocore
import click
import collections
import hjson as json
import inspect
import importlib
import logging
import os
import pkg_resources
import random
import re
import requests
import slugify
import string
import sys
import tempfile
import time
import toml
import yaml
import zipfile
from click.exceptions import ClickException
from dateutil import parser
from datetime import datetime,timedelta
from zappa import Zappa, logger, API_GATEWAY_REGIONS
from util import (check_new_version_available, detect_django_settings,
detect_flask_apps, parse_s3_url, human_size)
CUSTOM_SETTINGS = [
'assume_policy',
'attach_policy',
'aws_region',
'delete_local_zip',
'delete_s3_zip',
'exclude',
'http_methods',
'integration_response_codes',
'method_header_types',
'method_response_codes',
'parameter_depth',
'role_name',
'touch',
]
##
# Main Input Processing
##
class ZappaCLI(object):
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
command_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$')
def __init__(self):
self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined environment '" + stage + "'.")
extends_stage = self.zappa_settings[stage].get('extends', None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if u'delete_zip' in settings:
settings[u'delete_local_zip'] = settings.get(u'delete_zip')
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, '_stage_config_overrides', {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, '_stage_config_overrides', {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("zappa").version,
help='Print the zappa version'
)
parser.add_argument(
'-a', '--app_function', help='The WSGI application function.'
)
parser.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
env_parser = argparse.ArgumentParser(add_help=False)
group = env_parser.add_mutually_exclusive_group()
all_help = ('Execute this command for all of our defined '
'Zappa environments.')
group.add_argument('--all', action='store_true', help=all_help)
group.add_argument('command_env', nargs='?')
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser(
'certify', parents=[env_parser],
help='Create and install SSL certificate'
)
cert_parser.add_argument(
'--no-cleanup', action='store_true',
help=("Don't remove certificate files from /tmp during certify."
" Dangerous.")
)
##
# Deploy
##
subparsers.add_parser(
'deploy', parents=[env_parser], help='Deploy application.'
)
subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser(
'package', parents=[env_parser], help='Build the application zip package locally.'
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
'invoke', parents=[env_parser],
help='Invoke remote function.'
)
invoke_parser.add_argument(
'--raw', action='store_true',
help=('When invoking remotely, invoke this python as a string,'
' not as a modular path.')
)
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser(
'manage',
help='Invoke remote Django manage.py commands.'
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
'rollback', parents=[env_parser],
help='Rollback deployed code to a previous version.'
)
rollback_parser.add_argument(
'-n', '--num-rollback', type=positive_int, default=0,
help='The number of versions to rollback.'
)
##
# Scheduling
##
subparsers.add_parser(
'schedule', parents=[env_parser],
help='Schedule functions to occur at regular intervals.'
)
##
# Status
##
status_parser = subparsers.add_parser(
'status', parents=[env_parser],
help='Show deployment status and event schedules.'
)
status_parser.add_argument(
'--json', action='store_true',
help='Returns status in JSON format.'
) # https://github.com/Miserlou/Zappa/issues/407
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
'tail', parents=[env_parser], help='Tail deployment logs.'
)
tail_parser.add_argument(
'--no-color', action='store_true',
help="Don't color log tail output."
)
tail_parser.add_argument(
'--http', action='store_true',
help='Only show HTTP requests in tail output.'
)
tail_parser.add_argument(
'--non-http', action='store_true',
help='Only show non-HTTP requests in tail output.'
)
tail_parser.add_argument(
'--since', type=str, default="100000s",
help="Only show lines since a certain timeframe."
)
tail_parser.add_argument(
'--filter', type=str, default="",
help="Apply a filter pattern to the logs."
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
'undeploy', parents=[env_parser], help='Undeploy application.'
)
undeploy_parser.add_argument(
'--remove-logs', action='store_true',
help=('Removes log groups of api gateway and lambda task'
' during the undeployment.'),
)
undeploy_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser],
help='Unschedule functions.')
##
# Updating
##
subparsers.add_parser(
'update', parents=[env_parser], help='Update deployed application.'
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both command_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that command_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if args.command == 'manage' and not self.vargs.get('all'):
self.command_env = self.vargs['command_rest'].pop(0)
else:
self.command_env = self.vargs.get('command_env')
self.command = args.command
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all environments, or just one?
all_environments = self.vargs.get('all')
environments = []
if all_environments: # All envs!
environments = self.zappa_settings.keys()
else: # Just one env.
if not self.command_env:
# If there's only one environment defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
environments.append(self.zappa_settings.keys()[0])
else:
parser.error("Please supply an environment to interact with.")
else:
environments.append(self.command_env)
for environment in environments:
try:
self.dispatch_command(self.command, environment)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, environment):
"""
Given a command to execute and environment,
execute that command.
"""
self.api_stage = environment
if command not in ['status', 'manage']:
click.echo("Calling " + click.style(command, fg="green", bold=True) + " for environment " +
click.style(self.api_stage, bold=True) + ".." )
# Explicity define the app function.
if self.vargs['app_function'] is not None:
self.app_function = self.vargs['app_function']
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs['settings_file'])
except ValueError as e:
print("Error: {}".format(e.message))
sys.exit(-1)
self.callback('settings')
# Hand it off
if command == 'deploy': # pragma: no cover
self.deploy()
if command == 'package': # pragma: no cover
self.package()
elif command == 'update': # pragma: no cover
self.update()
elif command == 'rollback': # pragma: no cover
self.rollback(self.vargs['num_rollback'])
elif command == 'invoke': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the function to invoke.")
return
self.invoke(self.vargs['command_rest'], raw_python=self.vargs['raw'])
elif command == 'manage': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print("If this is a Django project, please define django_settings in your zappa_settings.")
return
command_tail = self.vargs.get('command_rest')
if len(command_tail) > 1:
command = " ".join(command_tail) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(command, command="manage")
elif command == 'tail': # pragma: no cover
self.tail(
colorize=(not self.vargs['no_color']),
http=self.vargs['http'],
non_http=self.vargs['non_http'],
since=self.vargs['since'],
filter_pattern=self.vargs['filter'],
)
elif command == 'undeploy': # pragma: no cover
self.undeploy(
noconfirm=self.vargs['yes'],
remove_logs=self.vargs['remove_logs']
)
elif command == 'schedule': # pragma: no cover
self.schedule()
elif command == 'unschedule': # pragma: no cover
self.unschedule()
elif command == 'status': # pragma: no cover
self.status(return_json=self.vargs['json'])
elif command == 'certify': # pragma: no cover
self.certify(no_cleanup=self.vargs['no_cleanup'])
##
# The Commands
##
def package(self):
"""
Only build the package
"""
# force not to delete the local zip
self.override_stage_config_setting('delete_local_zip', False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package()
self.callback('zip')
size = human_size(os.path.getsize(self.zip_path))
click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")")
def deploy(self):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException("This application is " + click.style("already deployed", fg="red") +
" - did you mean to call " + click.style("update", bold=True) + "?")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
raise ClickException(
click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!\n" +
"You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.\n" +
"To fix this, see here: " +
click.style("https://github.com/Miserlou/Zappa#using-custom-aws-iam-roles-and-policies", bold=True)
+ '\n')
# Create the Lambda Zip
self.create_package()
self.callback('zip')
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_current_project.zip'.format(self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
self.lambda_arn = self.zappa.create_lambda_function(
bucket=self.s3_bucket_name,
s3_key=handler_file,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size
)
# Schedule events for this deployment
self.schedule()
endpoint_url = ''
deployment_string = click.style("Deployment complete", fg="green", bold=True) + "!"
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(self.lambda_arn,
self.lambda_name,
self.api_key_required,
self.integration_content_type_aliases,
self.iam_authorization,
self.authorizer,
self.cors)
self.zappa.update_stack(self.lambda_name, self.s3_bucket_name, wait=True)
# Deploy the API!
api_id = self.zappa.get_api_id(self.lambda_name)
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(api_key=self.api_key, api_id=api_id, stage_name=self.api_stage)
if self.stage_config.get('touch', True):
requests.get(endpoint_url)
# Finally, delete the local copy our zip package
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
# Remove the project zip from S3.
self.remove_uploaded_zip()
self.callback('post')
click.echo(deployment_string)
def update(self):
"""
Repackage and update the function code.
"""
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
last_updated = parser.parse(conf['LastModified'])
last_updated_unix = time.mktime(last_updated.timetuple())
except Exception as e:
click.echo(click.style("Warning!", fg="red") + " Couldn't get function " + self.lambda_name +
" in " + self.zappa.aws_region + " - have you deployed yet?")
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(click.style("Warning!", fg="red") +
" You may have upgraded Zappa since deploying this application. You will need to " +
click.style("redeploy", bold=True) + " for this deployment to work properly!")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!")
click.echo("You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.")
click.echo("To fix this, see here: " +
click.style("https://github.com/Miserlou/Zappa#using-custom-aws-iam-roles-and-policies",
bold=True))
sys.exit(-1)
# Create the Lambda Zip,
self.create_package()
self.callback('zip')
# Upload it to S3
success = self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_current_project.zip'.format(self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
self.lambda_arn = self.zappa.update_lambda_function(
self.s3_bucket_name, handler_file, self.lambda_name)
# Remove the uploaded zip from S3, because it is now registered..
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size)
# Finally, delete the local copy our zip package
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(self.lambda_arn,
self.lambda_name,
self.api_key_required,
self.integration_content_type_aliases,
self.iam_authorization,
self.authorizer,
self.cors)
self.zappa.update_stack(self.lambda_name, self.s3_bucket_name, wait=True, update_only=True)
api_id = self.zappa.get_api_id(self.lambda_name)
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get('domain', None):
endpoint_url = self.stage_config.get('domain')
else:
endpoint_url = None
self.schedule()
self.callback('post')
if endpoint_url and 'https://' not in endpoint_url:
endpoint_url = 'https://' + endpoint_url
deployed_string = "Your updated Zappa deployment is " + click.style("live", fg='green', bold=True) + "!"
if self.use_apigateway:
deployed_string = deployed_string + ": " + click.style("{}".format(endpoint_url), bold=True)
api_url = None
if endpoint_url and 'amazonaws.com' not in endpoint_url:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get('touch', True):
if api_url:
requests.get(api_url)
elif endpoint_url:
requests.get(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision)
print("Done!")
def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
from util import string_to_timestamp
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [ e for e in new_logs if e['timestamp'] > last_since ]
self.print_logs(new_logs, colorize, http, non_http)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]['timestamp']
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, noconfirm=False, remove_logs=False):
"""
Tear down an exiting deployment.
"""
if not noconfirm: # pragma: no cover
confirm = raw_input("Are you sure you want to undeploy? [y/n] ")
if confirm != 'y':
return
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get('domain', None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name,
domain_name=domain_name
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get('events', [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get('function'))
if self.stage_config.get('keep_warm', True):
if not events:
events = []
keep_warm_rate = self.stage_config.get('keep_warm_expression', "rate(4 minutes)")
events.append({'name': 'zappa-keep-warm',
'function': 'handler.keep_warm_callback',
'expression': keep_warm_rate,
'description': 'Zappa Keep Warm - {}'.format(self.lambda_name)})
if self.stage_config.get('lets_encrypt_expression'):
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
timeout = conf['Timeout']
if timeout < 60:
click.echo(click.style("Unable to schedule certificate autorenewer!", fg="red", bold=True) +
" Please redeploy with a " + click.style("timeout_seconds", bold=True) + " greater than 60!")
else:
events.append({'name': 'zappa-le-certify',
'function': 'handler.certify_callback',
'expression': self.stage_config.get('lets_encrypt_expression'),
'description': 'Zappa LE Certificate Renewer - {}'.format(self.lambda_name)})
if events:
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(click.style("Function does not exist", fg="yellow") + ", please " +
click.style("deploy", bold=True) + "first. Ex:" +
click.style("zappa deploy {}.".format(self.api_stage), bold=True))
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response['Configuration']['FunctionArn'],
lambda_name=self.lambda_name,
events=events
)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get('events', [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
function_arn = function_response['Configuration']['FunctionArn']
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(self.api_stage))
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
def invoke(self, function_name, raw_python=False, command=None):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else 'command'
if raw_python:
command = {'raw_command': function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type='RequestResponse',
)
if 'LogResult' in response:
print(base64.b64decode(response['LogResult']))
else:
print(response)
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convience function for priting formatted table items.
"""
click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value)))
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" %
(self.lambda_name, self.zappa.aws_region), fg='red'))
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
self.lambda_arn = conf['FunctionArn']
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf['Role']
status_dict["Lambda Handler"] = conf['Handler']
status_dict["Lambda Code Size"] = conf['CodeSize']
status_dict["Lambda Version"] = conf['Version']
status_dict["Lambda Last Modified"] = conf['LastModified']
status_dict["Lambda Memory Size"] = conf['MemorySize']
status_dict["Lambda Timeout"] = conf['Timeout']
status_dict["Lambda Runtime"] = conf['Runtime']
if 'VpcConfig' in conf.keys():
status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned')
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Invocations',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Errors',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda funciton.
domain_url = self.stage_config.get('domain', None)
if domain_url:
status_dict["Domain URL"] = 'https://' + domain_url
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict['Events'] = []
for rule in event_rules:
event_dict = {}
rule_name = rule['Name']
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get(u'ScheduleExpression', None)
event_dict["Event Rule State"] = rule.get(u'State', None).title()
event_dict["Event Rule ARN"] = rule.get(u'Arn', None)
status_dict['Events'].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == 'Events':
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for k,v in environment.iteritems():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings)))
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these raw_inputs requires monkeypatching with mock, which isn't pretty.
"""
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException("This project is " + click.style("already initialized", fg="red", bold=True) + "!")
# Ensure P2 until Lambda supports it.
if sys.version_info >= (3,0): # pragma: no cover
raise ClickException("Zappa curently only works with Python 2, until AWS Lambda adds Python 3 support.")
# Ensure inside virtualenv.
if not ( hasattr(sys, 'prefix') or hasattr(sys, 'real_prefix') or hasattr(sys, 'base_prefix') ): # pragma: no cover
raise ClickException(
"Zappa must be run inside of a virtual environment!\n"
"Learn more about virtual environments here: http://docs.python-guide.org/en/latest/dev/virtualenvs/")
# Explain system.
click.echo(click.style(u"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""", fg='green', bold=True))
click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True))
click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway.")
click.echo("This `init` command will help you create and configure your new Zappa deployment.")
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo("Your Zappa configuration can support multiple production environments, like '" +
click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" +
click.style("production", bold=True) + "'.")
env = raw_input("What do you want to call this environment (default 'dev'): ") or "dev"
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Environment names must match a-zA-Z0-9_", fg='red'))
# Create Bucket
click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".")
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9))
bucket = raw_input("What do you want call your bucket? (default '%s'): " % default_bucket) or default_bucket
# TODO actually create bucket.
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print('')
# App-specific
if has_django: # pragma: no cover
click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!")
click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?")
django_settings = None
matches = detect_django_settings()
while django_settings in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
django_settings = raw_input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0]
else:
click.echo("(This will likely be something like 'your_project.settings')")
django_settings = raw_input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.")
matches = detect_flask_apps()
click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?")
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
app_function = raw_input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0]
else:
app_function = raw_input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.")
click.echo("If you are using Zappa for the first time, you probably don't want to do this!")
global_deployment = False
while True:
global_type = raw_input("Would you like to deploy this application to " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ")
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
if global_deployment:
regions = API_GATEWAY_REGIONS
if global_type.lower() in ["p", "primary"]:
envs = [{env + '_' + region.replace('-', '_'): { 'aws_region': region}} for region in regions if '-1' in region]
else:
envs = [{env + '_' + region.replace('-', '_'): { 'aws_region': region}} for region in regions]
else:
region = None # assume system default
envs = [{env: {}}]
zappa_settings = {}
for each_env in envs:
# Honestly, this could be cleaner.
env_name = each_env.keys()[0]
env_dict = each_env[env_name]
env_bucket = bucket
if global_deployment:
env_bucket = bucket.replace('-', '_') + '_' + env_name
env_zappa_settings = {
env_name: {
's3_bucket': env_bucket,
}
}
if env_dict.has_key('aws_region'):
env_zappa_settings[env_name]['aws_region'] = env_dict.get('aws_region')
zappa_settings.update(env_zappa_settings)
if has_django:
zappa_settings[env_name]['django_settings'] = django_settings
else:
zappa_settings[env_name]['app_function'] = app_function
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo("\nOkay, here's your " + click.style("zappa_settings.js", bold=True) + ":\n")
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = raw_input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes'
if confirm[0] not in ['y', 'Y', 'yes', 'YES']:
click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.")
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n")
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n")
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) +
" here: " + click.style("https://github.com/Miserlou/Zappa", fg="cyan", bold=True))
click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " +
click.style("http://bit.do/zappa", fg="cyan", bold=True))
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_cleanup=False):
"""
Register or update a domain certificate for this env.
"""
# Give warning on --no-cleanup
if no_cleanup:
clean_up = False
click.echo(click.style("Warning!", fg="red", bold=True) + " You are calling certify with " +
click.style("--no-cleanup", bold=True) +
". Your certificate files will remain in the system temporary directory after this command executes!")
else:
clean_up = True
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException("This application " + click.style("isn't deployed yet", fg="red") +
" - did you mean to call " + click.style("deploy", bold=True) + "?")
# Get install account_key to /tmp/account_key.pem
account_key_location = self.stage_config.get('lets_encrypt_key')
domain = self.stage_config.get('domain')
cert_location = self.stage_config.get('certificate', None)
cert_key_location = self.stage_config.get('certificate_key', None)
cert_chain_location = self.stage_config.get('certificate_chain', None)
if not domain:
raise ClickException("Can't certify a domain without " + click.style("domain", fg="red", bold=True) + " configured!")
if not cert_location:
if not account_key_location:
raise ClickException("Can't certify a domain without " + click.style("lets_encrypt_key", fg="red", bold=True) + " configured!")
if account_key_location.startswith('s3://'):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(bucket, key_name, '/tmp/account.key')
else:
from shutil import copyfile
copyfile(account_key_location, '/tmp/account.key')
else:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException("Can't certify a domain without " +
click.style("certificate, certificate_key and certificate_chain", fg="red", bold=True) + " configured!")
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo("Certifying domain " + click.style(domain, fg="green", bold=True) + "..")
# Get cert and update domain.
if not cert_location:
from letsencrypt import get_cert_and_update_domain, cleanup
cert_success = get_cert_and_update_domain(
self.zappa,
self.lambda_name,
self.api_stage,
domain,
clean_up
)
else:
if not self.zappa.get_domain_name(domain):
self.zappa.create_domain_name(
domain,
domain + "-Zappa-Cert",
certificate_body,
certificate_private_key,
certificate_chain,
self.lambda_name,
self.api_stage
)
print("Created a new domain name. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part.")
else:
self.zappa.update_domain_name(
domain,
domain + "-Zappa-Cert",
certificate_body,
certificate_private_key,
certificate_chain
)
cert_success = True
# Deliberately undocumented feature (for now, at least.)
# We are giving the user the ability to shoot themselves in the foot.
# _This is probably not a good idea._
# However, I am sick and tired of hitting the Let's Encrypt cert
# limit while testing.
if clean_up:
cleanup()
if cert_success:
click.echo("Certificate " + click.style("updated", fg="green", bold=True) + "!")
else:
click.echo(click.style("Failed", fg="red", bold=True) + " to generate or install certificate! :(")
click.echo("\n==============\n")
shamelessly_promote()
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get('callbacks', {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit('.', 1)
try: # Prefer callback in working directory
if mod_path.count('.') >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import {position} callback ".format(position=position),
bold=True) + 'module: "{mod_path}"'.format(mod_path=click.style(mod_path, bold=True)))
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find {position} callback ".format(position=position), bold=True) + 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)) + 'in module "{mod_path}"'.format(mod_path=mod_path))
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(click.style("Important!", fg="yellow", bold=True) +
" A new version of " + click.style("Zappa", bold=True) + " is available!")
click.echo("Upgrade with: " + click.style("pip install zappa --upgrade", bold=True))
click.echo("Visit the project page on GitHub to see the latest changes: " +
click.style("https://github.com/Miserlou/Zappa", bold=True))
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the environments are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name))
# Make sure that this environment is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException("Please define '{0!s}' in your Zappa settings.".format(self.api_stage))
# We need a working title for this project. Use one if supplied, else cwd dirname.
if 'project_name' in self.stage_config: # pragma: no cover
self.project_name = self.stage_config['project_name']
else:
self.project_name = slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
if len(self.project_name) > 15: # pragma: no cover
click.echo(click.style("Warning", fg="red", bold=True) + "! Your " + click.style("project_name", bold=True) +
" may be too long to deploy! Please make it <16 characters.")
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Django's slugify doesn't replace _, but this does.
self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage)
# Load environment-specific settings
self.s3_bucket_name = self.stage_config.get('s3_bucket', "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)))
self.vpc_config = self.stage_config.get('vpc_config', {})
self.memory_size = self.stage_config.get('memory_size', 512)
self.app_function = self.stage_config.get('app_function', None)
self.exception_handler = self.stage_config.get('exception_handler', None)
self.aws_region = self.stage_config.get('aws_region', None)
self.debug = self.stage_config.get('debug', True)
self.prebuild_script = self.stage_config.get('prebuild_script', None)
self.profile_name = self.stage_config.get('profile_name', None)
self.log_level = self.stage_config.get('log_level', "DEBUG")
self.domain = self.stage_config.get('domain', None)
self.timeout_seconds = self.stage_config.get('timeout_seconds', 30)
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get('use_apigateway', True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get('apigateway_enabled', True)
self.integration_content_type_aliases = self.stage_config.get('integration_content_type_aliases', {})
self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler')
# DEPRICATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None)
self.remote_env_file = self.stage_config.get('remote_env_file', None)
self.remote_env = self.stage_config.get('remote_env', None)
self.settings_file = self.stage_config.get('settings_file', None)
self.django_settings = self.stage_config.get('django_settings', None)
self.manage_roles = self.stage_config.get('manage_roles', True)
self.api_key_required = self.stage_config.get('api_key_required', False)
self.api_key = self.stage_config.get('api_key')
self.iam_authorization = self.stage_config.get('iam_authorization', False)
self.cors = self.stage_config.get("cors", None)
self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment")
self.environment_variables = self.stage_config.get('environment_variables', {})
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get('authorizer', {})
self.zappa = Zappa( boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith('policy'):
with open(setting_val, 'r') as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == '.py':
click.echo(click.style("Warning!", fg="red", bold=True) +
" Your app_function is pointing to a " + click.style("file and not a function", bold=True) +
"! It should probably be something like 'my_file.app', not 'my_file.py'!")
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yaml = settings_name + ".yml"
zs_toml = settings_name + ".toml"
# Must have at least one
if not os.path.isfile(zs_json) \
and not os.path.isfile(zs_yaml) \
and not os.path.isfile(zs_toml):
raise ClickException("Please configure a zappa_settings file.")
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
if '.yml' in settings_file:
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.")
elif '.toml' in settings_file:
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.")
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.")
def create_package(self):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py'
# Create the zip file(s)
if self.stage_config.get('slim_handler', False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=self.stage_config.get('exclude', [])
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get('exclude', [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split('/')[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix='handler_{0!s}'.format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude
)
else:
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=self.stage_config.get(
'exclude',
# Exclude packages already builtin to the python lambda environment
# https://github.com/Miserlou/Zappa/issues/556
["boto3", "dateutil", "botocore", "s3transfer", "six.py", "jmespath", "concurrent"])
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. '
'Try setting "slim_handler" to true in your Zappa settings file.\n\n')
# Throw custom setings into the zip that handles requests
if self.stage_config.get('slim_handler', False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, 'a') as lambda_zip:
settings_s = "# Generated by Zappa\n"
if self.app_function:
if '.' not in self.app_function: # pragma: no cover
raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." +
" It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.")
app_module, app_function = self.app_function.rsplit('.', 1)
settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(
self.remote_env
)
# DEPRICATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict['AWS_REGION'] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environement variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
env_dict = dict((k.encode('ascii'), v) for (k, v) in env_dict.items())
except Exception: # pragma: nocover
raise ValueError("Environment variable keys must not be unicode.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(
env_dict
)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file))
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings))
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get('slim_handler', False):
settings_s += "ZIP_PATH='s3://{0!s}/{1!s}_current_project.zip'\n".format(self.s3_bucket_name, self.project_name)
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get('events', [])
for event in events:
arn = event.get('event_source', {}).get('arn')
function = event.get('function')
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Authorizer config
authorizer_function = self.authorizer.get('function', None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py'))
lambda_zip.write(django_py, 'django_zappa_app.py')
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(settings_s)
temp_settings.close()
lambda_zip.write(temp_settings.name, 'zappa_settings.py')
os.remove(temp_settings.name)
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get('delete_local_zip', True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get('delete_s3_zip', True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get('slim_handler', False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(self, logs, colorize=True, http=False, non_http=False):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log['timestamp']
message = log['message']
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
else:
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if 'Zappa Event' in string:
return False
# IP address filter
for token in string.replace('\t', ' ').split(' '):
try:
if (token.count('.') is 3 and token.replace('.', '').isnumeric()):
return True
except Exception: # pragma: no cover
pass
return False
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r'\[([^]]*)\]', string)
for token in inside_squares:
if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan'))
else:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan'))
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(token, click.style(token, fg="yellow"))
# And UUIDs
for token in final_string.replace('\t', ' ').split(' '):
try:
if token.count('-') is 4 and token.replace('-', '').isalnum():
final_string = final_string.replace(token, click.style(token, fg="magenta"))
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count('.') is 3 and token.replace('.', '').isnumeric():
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ['200']:
final_string = final_string.replace(token, click.style(token, fg="green"))
if token in ['400', '401', '403', '404', '405', '500']:
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green"))
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split('\t'):
try:
is_date = parser.parse(token)
final_string = final_string.replace(token, click.style(token, fg="green"))
except Exception: # pragma: no cover
pass
final_string = final_string.replace('\t', ' ').replace(' ', ' ')
if final_string[0] != ' ':
final_string = ' ' + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1)
try: # Prefer prebuild script in working directory
if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find prebuild script ", bold=True) + 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.", "wsgi.", "middleware.", "handler.", "util.", "letsencrypt.", "cli."
]
for namespace_collision in namespace_collisions:
if namespace_collision in item:
click.echo(click.style("Warning!", fg="red", bold=True) +
" You may have a namespace collision with " + click.style(item, bold=True) +
"! You may want to rename that file.")
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get('cache_cluster_enabled', False)
cache_cluster_size = str(self.stage_config.get('cache_cluster_size', .5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get('cloudwatch_log_level', 'OFF'),
cloudwatch_data_trace=self.stage_config.get('cloudwatch_data_trace', False),
cloudwatch_metrics_enabled=self.stage_config.get('cloudwatch_metrics_enabled', False),
)
return endpoint_url
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo("Need " + click.style("help", fg='green', bold=True) +
"? Found a " + click.style("bug", fg='green', bold=True) +
"? Let us " + click.style("know", fg='green', bold=True) + "! :D")
click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: "
+ click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True))
click.echo("And join our " + click.style("Slack", bold=True) + " channel here: "
+ click.style("https://slack.zappa.io", fg='cyan', bold=True))
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(")
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == '__main__': # pragma: no cover
handle()
|
parroyo/Zappa
|
zappa/cli.py
|
Python
|
mit
| 87,982
|
[
"VisIt"
] |
710c0823e03a4e6abc0a776da06a52b42cc6fc581b10446344905b97600979bd
|
r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`sphx_glr_tutorials_text_mathtext.py`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
from six import unichr
from math import ceil
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
import pyparsing
from pyparsing import (Combine, Group, Optional, Forward,
Literal, OneOrMore, ZeroOrMore, ParseException, Empty,
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException,
FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException)
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, maxdict
from matplotlib.ft2font import (FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT,
LOAD_NO_HINTING)
from matplotlib.font_manager import findfont, FontProperties, get_font
from matplotlib._mathtext_data import (latex_to_bakoma, latex_to_standard,
tex2uni, latex_to_cmex,
stix_virtual_fonts)
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol, math=True):
"""get_unicode_index(symbol, [bool]) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\\pi'), or a
Type1 symbol name (i.e. 'phi').
If math is False, the current symbol should be treated as a non-math symbol.
"""
# for a non-math symbol, simply return its unicode index
if not math:
return ord(symbol)
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError(message)
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_rect_filled`
- :meth:`get_results`
And optionally, if you need to use a FreeType hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.width = 0
self.height = 0
self.depth = 0
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the FreeType hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendAgg(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
self.mode = 'bbox'
self.bbox = [0, 0, 0, 0]
MathtextBackend.__init__(self)
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
if self.mode != 'bbox':
self.image = FT2Image(ceil(w), ceil(h + max(d, 0)))
def render_glyph(self, ox, oy, info):
if self.mode == 'bbox':
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
else:
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph,
antialiased=rcParams['text.antialiased'])
def render_rect_filled(self, x1, y1, x2, y2):
if self.mode == 'bbox':
self._update_bbox(x1, y1, x2, y2)
else:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box, used_characters):
self.mode = 'bbox'
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self.mode = 'render'
self.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
result = (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
used_characters)
self.image = None
return result
def get_hinting_type(self):
from matplotlib.backends import backend_agg
return backend_agg.get_hinting_flag()
class MathtextBackendBitmap(MathtextBackendAgg):
def get_results(self, box, used_characters):
ox, oy, width, height, depth, image, characters = \
MathtextBackendAgg.get_results(self, box, used_characters)
return image, depth
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = six.moves.cStringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
used_characters)
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
used_characters)
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
used_characters)
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi, math=True):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
*math*: whether sym is a math character
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi, math)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
result = self.mathtext_backend.get_results(box, self.get_used_characters())
self.destroy()
return result
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = get_font(filename)
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None and os.path.exists(basename):
cached_font = get_font(basename)
self._fonts[basename] = cached_font
self._fonts[cached_font.postscript_name] = cached_font
self._fonts[cached_font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, font, glyph, fontsize, dpi):
if font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize, math)
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, fontname, fontsize, dpi):
font = self._get_font(fontname)
font.set_size(fontsize, dpi)
pclt = font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(fontname, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in six.iteritems(self._fontmap):
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
symbol_name = None
font = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
font = self._get_font(basename)
elif len(sym) == 1:
slanted = (fontname == "it")
font = self._get_font(fontname)
if font is not None:
num = ord(sym)
if font is not None:
gid = font.get_char_index(num)
if gid != 0:
symbol_name = font.get_glyph_name(gid)
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize, math)
return font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [(r'\leftparen', '('),
(r'\rightparent', ')'),
(r'\leftbrace', '{'),
(r'\rightbrace', '}'),
(r'\leftbracket', '['),
(r'\rightbracket', ']'),
(r'\{', '{'),
(r'\}', '}'),
(r'\[', '['),
(r'\]', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym, math)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
font = self._get_font(new_fontname)
if font is not None:
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
found_symbol = True
if not found_symbol:
if self.cm_fallback:
if isinstance(self.cm_fallback, BakomaFonts):
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
if (fontname in ('it', 'regular') and
isinstance(self.cm_fallback, StixFonts)):
return self.cm_fallback._get_glyph(
'rm', font_class, sym, fontsize)
else:
return self.cm_fallback._get_glyph(
fontname, font_class, sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U+%x]" %
(new_fontname,
sym.encode('ascii', 'backslashreplace').decode('ascii'),
uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = font.get_char_index(uniindex)
slanted = False
symbol_name = font.get_glyph_name(glyphindex)
return font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class DejaVuFonts(UnicodeFonts):
use_cmex = False
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if isinstance(self, DejaVuSerifFonts):
self.cm_fallback = StixFonts(*args, **kwargs)
else:
self.cm_fallback = StixSansFonts(*args, **kwargs)
self.bakoma = BakomaFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
# Include Stix sized alternatives for glyphs
self._fontmap.update({
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'})
for key, name in six.iteritems(self._fontmap):
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
""" Override prime symbol to use Bakoma """
if sym == r'\prime':
return self.bakoma._get_glyph(fontname,
font_class, sym, fontsize, math)
else:
# check whether the glyph is available in the display font
uniindex = get_unicode_index(sym)
font = self._get_font('ex')
if font is not None:
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
return super(DejaVuFonts, self)._get_glyph('ex',
font_class, sym, fontsize, math)
# otherwise return regular glyph
return super(DejaVuFonts, self)._get_glyph(fontname,
font_class, sym, fontsize, math)
class DejaVuSerifFonts(DejaVuFonts):
"""
A font handling class for the DejaVu Serif fonts
If a glyph is not found it will fallback to Stix Serif
"""
_fontmap = { 'rm' : 'DejaVu Serif',
'it' : 'DejaVu Serif:italic',
'bf' : 'DejaVu Serif:weight=bold',
'sf' : 'DejaVu Sans',
'tt' : 'DejaVu Sans Mono',
'ex' : 'DejaVu Serif Display',
0 : 'DejaVu Serif',
}
class DejaVuSansFonts(DejaVuFonts):
"""
A font handling class for the DejaVu Sans fonts
If a glyph is not found it will fallback to Stix Sans
"""
_fontmap = { 'rm' : 'DejaVu Sans',
'it' : 'DejaVu Sans:italic',
'bf' : 'DejaVu Sans:weight=bold',
'sf' : 'DejaVu Sans',
'tt' : 'DejaVu Sans Mono',
'ex' : 'DejaVu Sans Display',
0 : 'DejaVu Sans',
}
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in six.iteritems(self._fontmap):
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
fixes = {'\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']'}
sym = fixes.get(sym, sym)
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
font = self._get_font(i)
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
with open(filename, 'rb') as fd:
default_font = AFM(fd)
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = six.moves.cStringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
with open(fname, 'rb') as fd:
cached_font = AFM(fd)
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi, math=True):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(six.text_type(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
font = self._get_font(font)
return font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
font = self._get_font(font)
return font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g., node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
class FontConstantsBase(object):
"""
A set of constants that controls how certain things, such as sub-
and superscripts are laid out. These are all metrics that can't
be reliably retrieved from the font metrics in the font itself.
"""
# Percentage of x-height of additional horiz. space after sub/superscripts
script_space = 0.05
# Percentage of x-height that sub/superscripts drop below the baseline
subdrop = 0.4
# Percentage of x-height that superscripts are raised from the baseline
sup1 = 0.7
# Percentage of x-height that subscripts drop below the baseline
sub1 = 0.3
# Percentage of x-height that subscripts drop below the baseline when a
# superscript is present
sub2 = 0.5
# Percentage of x-height that sub/supercripts are offset relative to the
# nucleus edge for non-slanted nuclei
delta = 0.025
# Additional percentage of last character height above 2/3 of the
# x-height that supercripts are offset relative to the subscript
# for slanted nuclei
delta_slanted = 0.2
# Percentage of x-height that supercripts and subscripts are offset for
# integrals
delta_integral = 0.1
class ComputerModernFontConstants(FontConstantsBase):
script_space = 0.075
subdrop = 0.2
sup1 = 0.45
sub1 = 0.2
sub2 = 0.3
delta = 0.075
delta_slanted = 0.3
delta_integral = 0.3
class STIXFontConstants(FontConstantsBase):
script_space = 0.1
sup1 = 0.8
sub2 = 0.6
delta = 0.05
delta_slanted = 0.3
delta_integral = 0.3
class STIXSansFontConstants(FontConstantsBase):
script_space = 0.05
sup1 = 0.8
delta_slanted = 0.6
delta_integral = 0.3
class DejaVuSerifFontConstants(FontConstantsBase):
pass
class DejaVuSansFontConstants(FontConstantsBase):
pass
# Maps font family names to the FontConstantBase subclass to use
_font_constant_mapping = {
'DejaVu Sans': DejaVuSansFontConstants,
'DejaVu Sans Mono': DejaVuSansFontConstants,
'DejaVu Serif': DejaVuSerifFontConstants,
'cmb10': ComputerModernFontConstants,
'cmex10': ComputerModernFontConstants,
'cmmi10': ComputerModernFontConstants,
'cmr10': ComputerModernFontConstants,
'cmss10': ComputerModernFontConstants,
'cmsy10': ComputerModernFontConstants,
'cmtt10': ComputerModernFontConstants,
'STIXGeneral': STIXFontConstants,
'STIXNonUnicode': STIXFontConstants,
'STIXSizeFiveSym': STIXFontConstants,
'STIXSizeFourSym': STIXFontConstants,
'STIXSizeThreeSym': STIXFontConstants,
'STIXSizeTwoSym': STIXFontConstants,
'STIXSizeOneSym': STIXFontConstants,
# Map the fonts we used to ship, just for good measure
'Bitstream Vera Sans': DejaVuSansFontConstants,
'Bitstream Vera': DejaVuSansFontConstants,
}
def _get_font_constant_set(state):
constants = _font_constant_mapping.get(
state.font_output._get_font(state.font).family_name,
FontConstantsBase)
# STIX sans isn't really its own fonts, just different code points
# in the STIX fonts, so we have to detect this one separately.
if (constants is STIXFontConstants and
isinstance(state.font_output, StixSansFonts)):
return STIXSansFontConstants
return constants
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state, math=True):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
self.math = math
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi, self.math)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if isinstance(glue_type, six.string_types):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False, factor=None):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
# Ensure that size 0 is chosen when the text is regular sized but
# with descender glyphs by subtracting 0.2 * xHeight
if char.height + char.depth >= target_total - 0.2 * xHeight:
break
shift = 0
if state.font != 0:
if factor is None:
factor = (target_total) / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = np.round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(s, loc, msg)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_math_style_dict = dict(displaystyle=0, textstyle=1,
scriptstyle=2, scriptscriptstyle=3)
_binary_operators = set('''
+ * -
\\pm \\sqcap \\rhd
\\mp \\sqcup \\unlhd
\\times \\vee \\unrhd
\\div \\wedge \\oplus
\\ast \\setminus \\ominus
\\star \\wr \\otimes
\\circ \\diamond \\oslash
\\bullet \\bigtriangleup \\odot
\\cdot \\bigtriangledown \\bigcirc
\\cap \\triangleleft \\dagger
\\cup \\triangleright \\ddagger
\\uplus \\lhd \\amalg'''.split())
_relation_symbols = set('''
= < > :
\\leq \\geq \\equiv \\models
\\prec \\succ \\sim \\perp
\\preceq \\succeq \\simeq \\mid
\\ll \\gg \\asymp \\parallel
\\subset \\supset \\approx \\bowtie
\\subseteq \\supseteq \\cong \\Join
\\sqsubset \\sqsupset \\neq \\smile
\\sqsubseteq \\sqsupseteq \\doteq \\frown
\\in \\ni \\propto \\vdash
\\dashv \\dots \\dotplus \\doteqdot'''.split())
_arrow_symbols = set('''
\\leftarrow \\longleftarrow \\uparrow
\\Leftarrow \\Longleftarrow \\Uparrow
\\rightarrow \\longrightarrow \\downarrow
\\Rightarrow \\Longrightarrow \\Downarrow
\\leftrightarrow \\longleftrightarrow \\updownarrow
\\Leftrightarrow \\Longleftrightarrow \\Updownarrow
\\mapsto \\longmapsto \\nearrow
\\hookleftarrow \\hookrightarrow \\searrow
\\leftharpoonup \\rightharpoonup \\swarrow
\\leftharpoondown \\rightharpoondown \\nwarrow
\\rightleftharpoons \\leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambi_delim = set("""
| \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
\\Downarrow \\Updownarrow . \\vert \\Vert \\\\|""".split())
_left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
_right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
def __init__(self):
p = Bunch()
# All forward declarations are here
p.accent = Forward()
p.ambi_delim = Forward()
p.apostrophe = Forward()
p.auto_delim = Forward()
p.binom = Forward()
p.bslash = Forward()
p.c_over_c = Forward()
p.customspace = Forward()
p.end_group = Forward()
p.float_literal = Forward()
p.font = Forward()
p.frac = Forward()
p.dfrac = Forward()
p.function = Forward()
p.genfrac = Forward()
p.group = Forward()
p.int_literal = Forward()
p.latexfont = Forward()
p.lbracket = Forward()
p.left_delim = Forward()
p.lbrace = Forward()
p.main = Forward()
p.math = Forward()
p.math_string = Forward()
p.non_math = Forward()
p.operatorname = Forward()
p.overline = Forward()
p.placeable = Forward()
p.rbrace = Forward()
p.rbracket = Forward()
p.required_group = Forward()
p.right_delim = Forward()
p.right_delim_safe = Forward()
p.simple = Forward()
p.simple_group = Forward()
p.single_symbol = Forward()
p.snowflake = Forward()
p.space = Forward()
p.sqrt = Forward()
p.stackrel = Forward()
p.start_group = Forward()
p.subsuper = Forward()
p.subsuperop = Forward()
p.symbol = Forward()
p.symbol_name = Forward()
p.token = Forward()
p.unknown_symbol = Forward()
# Set names on everything -- very useful for debugging
for key, val in vars(p).items():
if not key.startswith('_'):
val.setName(key)
p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
p.int_literal <<= Regex("[-+]?[0-9]+")
p.lbrace <<= Literal('{').suppress()
p.rbrace <<= Literal('}').suppress()
p.lbracket <<= Literal('[').suppress()
p.rbracket <<= Literal(']').suppress()
p.bslash <<= Literal('\\')
p.space <<= oneOf(list(self._space_widths))
p.customspace <<= (Suppress(Literal(r'\hspace'))
- ((p.lbrace + p.float_literal + p.rbrace)
| Error(r"Expected \hspace{n}")))
unicode_range = "\U00000080-\U0001ffff"
p.single_symbol <<= Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
unicode_range)
p.snowflake <<= Suppress(p.bslash) + oneOf(self._snowflake)
p.symbol_name <<= (Combine(p.bslash + oneOf(list(tex2uni))) +
FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd()))
p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
p.apostrophe <<= Regex("'+")
p.c_over_c <<= Suppress(p.bslash) + oneOf(list(self._char_over_chars))
p.accent <<= Group(
Suppress(p.bslash)
+ oneOf(list(self._accent_map) + list(self._wide_accents))
- p.placeable
)
p.function <<= Suppress(p.bslash) + oneOf(list(self._function_names))
p.start_group <<= Optional(p.latexfont) + p.lbrace
p.end_group <<= p.rbrace.copy()
p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
p.required_group<<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
p.group <<= Group(p.start_group + ZeroOrMore(p.token) + p.end_group)
p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
p.latexfont <<= Suppress(p.bslash) + oneOf(['math' + x for x in self._fontnames])
p.frac <<= Group(
Suppress(Literal(r"\frac"))
- ((p.required_group + p.required_group) | Error(r"Expected \frac{num}{den}"))
)
p.dfrac <<= Group(
Suppress(Literal(r"\dfrac"))
- ((p.required_group + p.required_group) | Error(r"Expected \dfrac{num}{den}"))
)
p.stackrel <<= Group(
Suppress(Literal(r"\stackrel"))
- ((p.required_group + p.required_group) | Error(r"Expected \stackrel{num}{den}"))
)
p.binom <<= Group(
Suppress(Literal(r"\binom"))
- ((p.required_group + p.required_group) | Error(r"Expected \binom{num}{den}"))
)
p.ambi_delim <<= oneOf(list(self._ambi_delim))
p.left_delim <<= oneOf(list(self._left_delim))
p.right_delim <<= oneOf(list(self._right_delim))
p.right_delim_safe <<= oneOf(list(self._right_delim - {'}'}) + [r'\}'])
p.genfrac <<= Group(
Suppress(Literal(r"\genfrac"))
- (((p.lbrace + Optional(p.ambi_delim | p.left_delim, default='') + p.rbrace)
+ (p.lbrace + Optional(p.ambi_delim | p.right_delim_safe, default='') + p.rbrace)
+ (p.lbrace + p.float_literal + p.rbrace)
+ p.simple_group + p.required_group + p.required_group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
)
p.sqrt <<= Group(
Suppress(Literal(r"\sqrt"))
- ((Optional(p.lbracket + p.int_literal + p.rbracket, default=None)
+ p.required_group)
| Error("Expected \\sqrt{value}"))
)
p.overline <<= Group(
Suppress(Literal(r"\overline"))
- (p.required_group | Error("Expected \\overline{value}"))
)
p.unknown_symbol<<= Combine(p.bslash + Regex("[A-Za-z]*"))
p.operatorname <<= Group(
Suppress(Literal(r"\operatorname"))
- ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
| Error("Expected \\operatorname{value}"))
)
p.placeable <<= ( p.snowflake # this needs to be before accent so named symbols
# that are prefixed with an accent name work
| p.accent # Must be before symbol as all accents are symbols
| p.symbol # Must be third to catch all named symbols and single chars not in a group
| p.c_over_c
| p.function
| p.group
| p.frac
| p.dfrac
| p.stackrel
| p.binom
| p.genfrac
| p.sqrt
| p.overline
| p.operatorname
)
p.simple <<= ( p.space
| p.customspace
| p.font
| p.subsuper
)
p.subsuperop <<= oneOf(["_", "^"])
p.subsuper <<= Group(
(Optional(p.placeable) + OneOrMore(p.subsuperop - p.placeable) + Optional(p.apostrophe))
| (p.placeable + Optional(p.apostrophe))
| p.apostrophe
)
p.token <<= ( p.simple
| p.auto_delim
| p.unknown_symbol # Must be last
)
p.auto_delim <<= (Suppress(Literal(r"\left"))
- ((p.left_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ Group(ZeroOrMore(p.simple | p.auto_delim))
+ Suppress(Literal(r"\right"))
- ((p.right_delim | p.ambi_delim) | Error("Expected a delimiter"))
)
p.math <<= OneOrMore(p.token)
p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
p.main <<= (p.non_math + ZeroOrMore(p.math_string + p.non_math)) + StringEnd()
# Set actions
for key, val in vars(p).items():
if not key.startswith('_'):
if hasattr(self, key):
val.setParseAction(getattr(self, key))
self._expression = p.main
self._math_expression = p.math
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
self._em_width_cache = {}
try:
result = self._expression.parseString(s)
except ParseBaseException as err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
six.text_type(err)]))
self._state_stack = None
self._em_width_cache = {}
self._expression.resetCache()
return result[0]
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def main(self, s, loc, toks):
#~ print "finish", toks
return [Hlist(toks)]
def math_string(self, s, loc, toks):
# print "math_string", toks[0][1:-1]
return self._math_expression.parseString(toks[0][1:-1])
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state(), math=False) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\,' : 0.16667, # 3/18 em = 3 mu
r'\thinspace' : 0.16667, # 3/18 em = 3 mu
r'\/' : 0.16667, # 3/18 em = 3 mu
r'\>' : 0.22222, # 4/18 em = 4 mu
r'\:' : 0.22222, # 4/18 em = 4 mu
r'\;' : 0.27778, # 5/18 em = 5 mu
r'\ ' : 0.33333, # 6/18 em = 6 mu
r'\enspace' : 0.5, # 9/18 em = 9 mu
r'\quad' : 1, # 1 em = 18 mu
r'\qquad' : 2, # 2 em = 36 mu
r'\!' : -0.16667, # -3/18 em = -3 mu
}
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[0]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
if c in self._spaced_symbols:
# iterate until we find previous character, needed for cases
# such as ${ -2}$, $ -2$, or $ -2$.
for i in six.moves.xrange(1, loc + 1):
prev_char = s[loc-i]
if prev_char != ' ':
break
# Binary operators at start of string should not be spaced
if (c in self._binary_operators and
(len(s[:loc].split()) == 0 or prev_char == '{' or
prev_char in self._left_delim)):
return [char]
else:
return [Hlist([self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = True)]
elif c in self._punctuation_symbols:
# Do not space commas between brackets
if c == ',':
prev_char, next_char = '', ''
for i in six.moves.xrange(1, loc + 1):
prev_char = s[loc - i]
if prev_char != ' ':
break
for i in six.moves.xrange(1, len(s) - loc):
next_char = s[loc + i]
if next_char != ' ':
break
if (prev_char == '{' and next_char == '}'):
return [char]
# Do not space dots as decimal separators
if (c == '.' and s[loc - 1].isdigit() and s[loc + 1].isdigit()):
return [char]
else:
return [Hlist([char,
self._make_space(0.2)],
do_kern = True)]
return [char]
snowflake = symbol
def unknown_symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
_char_over_chars = {
# The first 2 entries in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA': (('it', 'A', 1.0), (None, '\\circ', 0.5), 0.0),
}
def c_over_c(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'ddot' : r'\combiningdiaeresis',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow',
r'mathring' : r'\circ'
}
_wide_accents = set(r"widehat widetilde widebar".split())
# make a lambda and call it to get the namespace right
_snowflake = (lambda am: [p for p in tex2uni if
any(p.startswith(a) and a != p for a in am)]
) (set(_accent_map))
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent_box = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent_box = Accent(self._accent_map[accent], state)
if accent == 'mathring':
accent_box.shrink()
accent_box.shrink()
centered = HCentered([Hbox(sym.width / 4.0), accent_box])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def operatorname(self, s, loc, toks):
self.push_state()
state = self.get_state()
state.font = 'rm'
# Change the font of Chars, but leave Kerns alone
for c in toks[0]:
if isinstance(c, Char):
c.font = 'rm'
c._update_metrics()
self.pop_state()
return Hlist(toks[0])
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
required_group = simple_group = group
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def is_between_brackets(self, s, loc):
return False
def subsuper(self, s, loc, toks):
assert(len(toks)==1)
nucleus = None
sub = None
super = None
# Pick all of the apostrophes out, including first apostrophes that have
# been parsed as characters
napostrophes = 0
new_toks = []
for tok in toks[0]:
if isinstance(tok, six.string_types) and tok not in ('^', '_'):
napostrophes += len(tok)
elif isinstance(tok, Char) and tok.c == "'":
napostrophes += 1
else:
new_toks.append(tok)
toks = new_toks
if len(toks) == 0:
assert napostrophes
nucleus = Hbox(0.0)
elif len(toks) == 1:
if not napostrophes:
return toks[0] # .asList()
else:
nucleus = toks[0]
elif len(toks) in (2, 3):
# single subscript or superscript
nucleus = toks[0] if len(toks) == 3 else Hbox(0.0)
op, next = toks[-2:]
if op == '_':
sub = next
else:
super = next
elif len(toks) in (4, 5):
# subscript and superscript
nucleus = toks[0] if len(toks) == 5 else Hbox(0.0)
op1, next1, op2, next2 = toks[-4:]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
if napostrophes:
if super is None:
super = Hlist([])
for i in range(napostrophes):
super.children.extend(self.symbol(s, loc, ['\\prime']))
# kern() and hpack() needed to get the metrics right after extending
super.kern()
super.hpack()
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# We remove kerning on the last character for consistency (otherwise it
# will compute kerning based on non-shrinked characters and may put them
# too close together when superscripted)
# We change the width of the last character to match the advance to
# consider some fonts with weird metrics: e.g. stix's f has a width of
# 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put
# the superscript at the advance
last_char = nucleus
if isinstance(nucleus, Hlist):
new_children = nucleus.children
if len(new_children):
# remove last kern
if (isinstance(new_children[-1],Kern) and
hasattr(new_children[-2], '_metrics')):
new_children = new_children[:-1]
last_char = new_children[-1]
if hasattr(last_char, '_metrics'):
last_char.width = last_char._metrics.advance
# create new Hlist without kerning
nucleus = Hlist(new_children, do_kern=False)
else:
if isinstance(nucleus, Char):
last_char.width = last_char._metrics.advance
nucleus = Hlist([nucleus])
# Handle regular sub/superscripts
constants = _get_font_constant_set(state)
lc_height = last_char.height
lc_baseline = 0
if self.is_dropsub(last_char):
lc_baseline = last_char.depth
# Compute kerning for sub and super
superkern = constants.delta * xHeight
subkern = constants.delta * xHeight
if self.is_slanted(last_char):
superkern += constants.delta * xHeight
superkern += (constants.delta_slanted *
(lc_height - xHeight * 2. / 3.))
if self.is_dropsub(last_char):
subkern = (3 * constants.delta -
constants.delta_integral) * lc_height
superkern = (3 * constants.delta +
constants.delta_integral) * lc_height
else:
subkern = 0
if super is None:
# node757
x = Hlist([Kern(subkern), sub])
x.shrink()
if self.is_dropsub(last_char):
shift_down = lc_baseline + constants.subdrop * xHeight
else:
shift_down = constants.sub1 * xHeight
x.shift_amount = shift_down
else:
x = Hlist([Kern(superkern), super])
x.shrink()
if self.is_dropsub(last_char):
shift_up = lc_height - constants.subdrop * xHeight
else:
shift_up = constants.sup1 * xHeight
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
y = Hlist([Kern(subkern),sub])
y.shrink()
if self.is_dropsub(last_char):
shift_down = lc_baseline + constants.subdrop * xHeight
else:
shift_down = constants.sub2 * xHeight
# If sub and superscript collide, move super up
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
if not self.is_dropsub(last_char):
x.width += constants.script_space * xHeight
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
# If style != displaystyle == 0, shrink the num and den
if style != self._math_style_dict['displaystyle']:
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks) == 1)
assert(len(toks[0]) == 6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks) == 1)
assert(len(toks[0]) == 2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness,
self._math_style_dict['textstyle'], num, den)
def dfrac(self, s, loc, toks):
assert(len(toks) == 1)
assert(len(toks[0]) == 2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness,
self._math_style_dict['displaystyle'], num, den)
def stackrel(self, s, loc, toks):
assert(len(toks) == 1)
assert(len(toks[0]) == 2)
num, den = toks[0]
return self._genfrac('', '', 0.0,
self._math_style_dict['textstyle'], num, den)
def binom(self, s, loc, toks):
assert(len(toks) == 1)
assert(len(toks[0]) == 2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0,
self._math_style_dict['textstyle'], num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
if len(middle):
height = max(x.height for x in middle)
depth = max(x.depth for x in middle)
factor = None
else:
height = 0
depth = 0
factor = 1.0
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state, factor=factor))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state, factor=factor))
hlist = Hlist(parts)
return hlist
def auto_delim(self, s, loc, toks):
#~ print "auto_delim", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'dejavuserif' : DejaVuSerifFonts,
'dejavusans' : DejaVuSansFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
# There is a bug in Python 3.x where it leaks frame references,
# and therefore can't handle this caching
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'dejavuserif', "
"'dejavusans', 'stix', 'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b, a = mcolors.to_rgba(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:, :, 0] = 255 * r
RGBA[:, :, 1] = 255 * g
RGBA[:, :, 2] = 255 * b
RGBA[:, :, 3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
_png.write_png(rgba, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, e.g., r'IQ: $\\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
|
jonyroda97/redbot-amigosprovaveis
|
lib/matplotlib/mathtext.py
|
Python
|
gpl-3.0
| 122,856
|
[
"Bowtie"
] |
108e56b237b55eb6d3c1bf89d9056235ab914dc0e4ccf733b48325f44b852aff
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize a simulation box where the particles can be repositioned via the
mouse and timed callbacks, and the temperature of the thermostat can be
changed via the keyboard.
"""
import espressomd
import espressomd.visualization_opengl
import numpy as np
required_features = []
espressomd.assert_features(required_features)
print("Press u/j to change temperature")
box_l = 10.0
system = espressomd.System(box_l=[box_l] * 3)
visualizer = espressomd.visualization_opengl.openGLLive(
system, drag_enabled=True, drag_force=100)
system.time_step = 0.00001
system.cell_system.skin = 3.0
N = 50
partcls = system.part.add(pos=N * [[0, 0, 0]])
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# Callback for particle positions/velocities
def spin():
partcls.pos = [[box_l * 0.5, box_l * (i + 1) / (N + 2), box_l * 0.5]
for i in range(N)]
partcls.v = [
[np.sin(10.0 * i / N) * 20, 0, np.cos(10.0 * i / N) * 20] for i in range(N)]
# Register timed callback
visualizer.register_callback(spin, interval=5000)
# Callbacks to control temperature
temperature = 1.0
def increaseTemp():
global temperature
temperature += 0.5
system.thermostat.set_langevin(kT=temperature, gamma=1.0)
print("T =", system.thermostat.get_state()[0]['kT'])
def decreaseTemp():
global temperature
temperature -= 0.5
if temperature > 0:
system.thermostat.set_langevin(kT=temperature, gamma=1.0)
print(f"T = {system.thermostat.get_state()[0]['kT']:.1f}")
else:
temperature = 0
system.thermostat.turn_off()
print("T = 0.")
# Register button callbacks
visualizer.keyboard_manager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('u', espressomd.visualization_opengl.KeyboardFireEvent.Hold, increaseTemp))
visualizer.keyboard_manager.register_button(
espressomd.visualization_opengl.KeyboardButtonEvent('j', espressomd.visualization_opengl.KeyboardFireEvent.Hold, decreaseTemp))
# Set initial position
spin()
# Start the visualizer
visualizer.run(1)
|
espressomd/espresso
|
samples/visualization_interactive.py
|
Python
|
gpl-3.0
| 2,788
|
[
"ESPResSo"
] |
71afcc87fc1e7212b9fa9260f54001e2315875c0a7203dbcc2779b3b6050e1c3
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of iris-grib.
#
# iris-grib is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iris-grib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with iris-grib. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for `iris_grib.message.Section`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris_grib.tests first so that some things can be initialised before
# importing anything else.
import iris_grib.tests as tests
import gribapi
import numpy as np
from iris_grib.message import Section
@tests.skip_data
class Test___getitem__(tests.IrisGribTest):
def setUp(self):
filename = tests.get_data_path(('GRIB', 'uk_t', 'uk_t.grib2'))
with open(filename, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
def test_scalar(self):
section = Section(self.grib_id, None, ['Ni'])
self.assertEqual(section['Ni'], 47)
def test_array(self):
section = Section(self.grib_id, None, ['codedValues'])
codedValues = section['codedValues']
self.assertEqual(codedValues.shape, (1551,))
self.assertArrayAlmostEqual(codedValues[:3],
[-1.78140259, -1.53140259, -1.28140259])
def test_typeOfFirstFixedSurface(self):
section = Section(self.grib_id, None, ['typeOfFirstFixedSurface'])
self.assertEqual(section['typeOfFirstFixedSurface'], 100)
def test_numberOfSection(self):
n = 4
section = Section(self.grib_id, n, ['numberOfSection'])
self.assertEqual(section['numberOfSection'], n)
def test_invalid(self):
section = Section(self.grib_id, None, ['Ni'])
with self.assertRaisesRegexp(KeyError, 'Nii'):
section['Nii']
@tests.skip_data
class Test__getitem___pdt_31(tests.IrisGribTest):
def setUp(self):
filename = tests.get_data_path(('GRIB', 'umukv', 'ukv_chan9.grib2'))
with open(filename, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
self.keys = ['satelliteSeries', 'satelliteNumber', 'instrumentType',
'scaleFactorOfCentralWaveNumber',
'scaledValueOfCentralWaveNumber']
def test_array(self):
section = Section(self.grib_id, None, self.keys)
for key in self.keys:
value = section[key]
self.assertIsInstance(value, np.ndarray)
self.assertEqual(value.shape, (1,))
@tests.skip_data
class Test_get_computed_key(tests.IrisGribTest):
def test_gdt40_computed(self):
fname = tests.get_data_path(('GRIB', 'gaussian', 'regular_gg.grib2'))
with open(fname, 'rb') as grib_fh:
self.grib_id = gribapi.grib_new_from_file(grib_fh)
section = Section(self.grib_id, None, [])
latitudes = section.get_computed_key('latitudes')
self.assertTrue(88.55 < latitudes[0] < 88.59)
if __name__ == '__main__':
tests.main()
|
pp-mo/iris-grib
|
iris_grib/tests/unit/message/test_Section.py
|
Python
|
lgpl-3.0
| 3,573
|
[
"Gaussian"
] |
dc4a81bf595ecf834f989e7ff989b0cf2c7f7a3002dab3a2ae52e37da05af1e3
|
"""Tests related to prerun part of the linter."""
import os
import subprocess
from typing import List
import pytest
from _pytest.monkeypatch import MonkeyPatch
from flaky import flaky
from ansiblelint import prerun
from ansiblelint.constants import INVALID_PREREQUISITES_RC
from ansiblelint.testing import run_ansible_lint
# https://github.com/box/flaky/issues/170
@flaky(max_runs=3) # type: ignore
def test_prerun_reqs_v1() -> None:
"""Checks that the linter can auto-install requirements v1 when found."""
cwd = os.path.realpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "examples", "reqs_v1"
)
)
result = run_ansible_lint("-v", ".", cwd=cwd)
assert "Running ansible-galaxy role install" in result.stderr, result.stderr
assert (
"Running ansible-galaxy collection install" not in result.stderr
), result.stderr
assert result.returncode == 0, result
@flaky(max_runs=3) # type: ignore
def test_prerun_reqs_v2() -> None:
"""Checks that the linter can auto-install requirements v2 when found."""
cwd = os.path.realpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "examples", "reqs_v2"
)
)
result = run_ansible_lint("-v", ".", cwd=cwd)
assert "Running ansible-galaxy role install" in result.stderr, result.stderr
assert "Running ansible-galaxy collection install" in result.stderr, result.stderr
assert result.returncode == 0, result
def test__update_env_no_old_value_no_default_no_value(monkeypatch: MonkeyPatch) -> None:
"""Make sure empty value does not touch environment."""
monkeypatch.delenv("DUMMY_VAR", raising=False)
prerun._update_env("DUMMY_VAR", [])
assert "DUMMY_VAR" not in os.environ
def test__update_env_no_old_value_no_value(monkeypatch: MonkeyPatch) -> None:
"""Make sure empty value does not touch environment."""
monkeypatch.delenv("DUMMY_VAR", raising=False)
prerun._update_env("DUMMY_VAR", [], "a:b")
assert "DUMMY_VAR" not in os.environ
def test__update_env_no_default_no_value(monkeypatch: MonkeyPatch) -> None:
"""Make sure empty value does not touch environment."""
monkeypatch.setenv("DUMMY_VAR", "a:b")
prerun._update_env("DUMMY_VAR", [])
assert os.environ["DUMMY_VAR"] == "a:b"
@pytest.mark.parametrize(
("value", "result"),
(
(["a"], "a"),
(["a", "b"], "a:b"),
(["a", "b", "c"], "a:b:c"),
),
)
def test__update_env_no_old_value_no_default(
monkeypatch: MonkeyPatch, value: List[str], result: str
) -> None:
"""Values are concatenated using : as the separator."""
monkeypatch.delenv("DUMMY_VAR", raising=False)
prerun._update_env("DUMMY_VAR", value)
assert os.environ["DUMMY_VAR"] == result
@pytest.mark.parametrize(
("default", "value", "result"),
(
("a:b", ["c"], "a:b:c"),
("a:b", ["c:d"], "a:b:c:d"),
),
)
def test__update_env_no_old_value(
monkeypatch: MonkeyPatch, default: str, value: List[str], result: str
) -> None:
"""Values are appended to default value."""
monkeypatch.delenv("DUMMY_VAR", raising=False)
prerun._update_env("DUMMY_VAR", value, default)
assert os.environ["DUMMY_VAR"] == result
@pytest.mark.parametrize(
("old_value", "value", "result"),
(
("a:b", ["c"], "a:b:c"),
("a:b", ["c:d"], "a:b:c:d"),
),
)
def test__update_env_no_default(
monkeypatch: MonkeyPatch, old_value: str, value: List[str], result: str
) -> None:
"""Values are appended to preexisting value."""
monkeypatch.setenv("DUMMY_VAR", old_value)
prerun._update_env("DUMMY_VAR", value)
assert os.environ["DUMMY_VAR"] == result
@pytest.mark.parametrize(
("old_value", "default", "value", "result"),
(
("", "", ["e"], "e"),
("a", "", ["e"], "a:e"),
("", "c", ["e"], "e"),
("a", "c", ["e:f"], "a:e:f"),
),
)
def test__update_env(
monkeypatch: MonkeyPatch,
old_value: str,
default: str,
value: List[str],
result: str,
) -> None:
"""Defaults are ignored when preexisting value is present."""
monkeypatch.setenv("DUMMY_VAR", old_value)
prerun._update_env("DUMMY_VAR", value)
assert os.environ["DUMMY_VAR"] == result
def test_require_collection_wrong_version() -> None:
"""Tests behaviour of require_collection."""
subprocess.check_output(
[
"ansible-galaxy",
"collection",
"install",
"containers.podman",
"-p",
"~/.ansible/collections",
]
)
with pytest.raises(SystemExit) as pytest_wrapped_e:
prerun.require_collection("containers.podman", '9999.9.9')
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC
@pytest.mark.parametrize(
("name", "version"),
(
("fake_namespace.fake_name", None),
("fake_namespace.fake_name", "9999.9.9"),
),
)
def test_require_collection_missing(name: str, version: str) -> None:
"""Tests behaviour of require_collection, missing case."""
with pytest.raises(SystemExit) as pytest_wrapped_e:
prerun.require_collection(name, version)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC
def test_ansible_config_get() -> None:
"""Check test_ansible_config_get."""
paths = prerun.ansible_config_get("COLLECTIONS_PATHS", list)
assert isinstance(paths, list)
assert len(paths) > 0
def test_install_collection() -> None:
"""Check that valid collection installs do not fail."""
prerun.install_collection("containers.podman:>=1.0")
def test_install_collection_fail() -> None:
"""Check that invalid collection install fails."""
with pytest.raises(SystemExit) as pytest_wrapped_e:
prerun.install_collection("containers.podman:>=9999.0")
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == INVALID_PREREQUISITES_RC
|
ansible/ansible-lint
|
test/test_prerun.py
|
Python
|
mit
| 6,088
|
[
"Galaxy"
] |
001d3a83e45d18fa277b4b9c191c9d11295591229b5e9b74d2b1379a426dc6c9
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
import time
from logging import getLogger
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from lettuce import step, world
from lettuce.django import django_url
from courseware.courses import get_course_by_id
from student.models import CourseEnrollment
from xmodule import seq_module, vertical_block
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
logger = getLogger(__name__)
@step('I (.*) capturing of screenshots before and after each step$')
def configure_screenshots_for_all_steps(_step, action):
"""
A step to be used in *.feature files. Enables/disables
automatic saving of screenshots before and after each step in a
scenario.
"""
action = action.strip()
if action == 'enable':
world.auto_capture_screenshots = True
elif action == 'disable':
world.auto_capture_screenshots = False
else:
raise ValueError('Parameter `action` should be one of "enable" or "disable".')
@world.absorb
def capture_screenshot_before_after(func):
"""
A decorator that will take a screenshot before and after the applied
function is run. Use this if you do not want to capture screenshots
for each step in a scenario, but rather want to debug a single function.
"""
def inner(*args, **kwargs):
prefix = round(time.time() * 1000)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'before'
))
ret_val = func(*args, **kwargs)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'after'
))
return ret_val
return inner
@step(u'The course "([^"]*)" exists$')
def create_course(_step, course):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course'
)
# Add a chapter to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
publish_item=True, # Not needed for direct-only but I'd rather the test didn't know that
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
publish_item=True,
)
@step(u'I am registered for the course "([^"]*)"$')
def i_am_registered_for_the_course(step, course):
# Create the course
create_course(step, course)
# Create the user
world.create_user('robot', 'test')
user = User.objects.get(username='robot')
# If the user is not already enrolled, enroll the user.
# TODO: change to factory
CourseEnrollment.enroll(user, course_id(course))
world.log_in(username='robot', password='test')
@step(u'The course "([^"]*)" has extra tab "([^"]*)"$')
def add_tab_to_course(_step, course, extra_tab_name):
world.ItemFactory.create(
parent_location=course_location(course),
category="static_tab",
display_name=str(extra_tab_name))
@step(u'I am in a course$')
def go_into_course(step):
step.given('I am registered for the course "6.002x"')
step.given('And I am logged in')
step.given('And I click on View Courseware')
# Do we really use these 3 w/ a different course than is in the scenario_dict? if so, why? If not,
# then get rid of the override arg
def course_id(course_num):
return world.scenario_dict['COURSE'].id.replace(course=course_num)
def course_location(course_num):
return world.scenario_dict['COURSE'].location.replace(course=course_num)
def section_location(course_num):
return world.scenario_dict['SECTION'].location.replace(course=course_num)
def visit_scenario_item(item_key):
"""
Go to the courseware page containing the item stored in `world.scenario_dict`
under the key `item_key`
"""
url = django_url(reverse(
'jump_to',
kwargs={
'course_id': unicode(world.scenario_dict['COURSE'].id),
'location': unicode(world.scenario_dict[item_key].location),
}
))
world.browser.visit(url)
def get_courses():
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
courses = [c for c in modulestore().get_courses()
if isinstance(c, CourseDescriptor)] # skip error descriptors
courses = sorted(courses, key=lambda course: course.location.course)
return courses
def get_courseware_with_tabs(course_id):
"""
Given a course_id (string), return a courseware array of dictionaries for the
top three levels of navigation. Same as get_courseware() except include
the tabs on the right hand main navigation page.
This hides the appropriate courseware as defined by the hide_from_toc field:
chapter.hide_from_toc
Example:
[{
'chapter_name': 'Overview',
'sections': [{
'clickable_tab_count': 0,
'section_name': 'Welcome',
'tab_classes': []
}, {
'clickable_tab_count': 1,
'section_name': 'System Usage Sequence',
'tab_classes': ['VerticalBlock']
}, {
'clickable_tab_count': 0,
'section_name': 'Lab0: Using the tools',
'tab_classes': ['HtmlDescriptor', 'HtmlDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Circuit Sandbox',
'tab_classes': []
}]
}, {
'chapter_name': 'Week 1',
'sections': [{
'clickable_tab_count': 4,
'section_name': 'Administrivia and Circuit Elements',
'tab_classes': ['VerticalBlock', 'VerticalBlock', 'VerticalBlock', 'VerticalBlock']
}, {
'clickable_tab_count': 0,
'section_name': 'Basic Circuit Analysis',
'tab_classes': ['CapaDescriptor', 'CapaDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Resistor Divider',
'tab_classes': []
}, {
'clickable_tab_count': 0,
'section_name': 'Week 1 Tutorials',
'tab_classes': []
}]
}, {
'chapter_name': 'Midterm Exam',
'sections': [{
'clickable_tab_count': 2,
'section_name': 'Midterm Exam',
'tab_classes': ['VerticalBlock', 'VerticalBlock']
}]
}]
"""
course = get_course_by_id(course_id)
chapters = [chapter for chapter in course.get_children() if not chapter.hide_from_toc]
courseware = [{
'chapter_name': c.display_name_with_default_escaped,
'sections': [{
'section_name': s.display_name_with_default_escaped,
'clickable_tab_count': len(s.get_children()) if (type(s) == seq_module.SequenceDescriptor) else 0,
'tabs': [{
'children_count': len(t.get_children()) if (type(t) == vertical_block.VerticalBlock) else 0,
'class': t.__class__.__name__} for t in s.get_children()
]
} for s in c.get_children() if not s.hide_from_toc]
} for c in chapters]
return courseware
|
proversity-org/edx-platform
|
lms/djangoapps/courseware/features/common.py
|
Python
|
agpl-3.0
| 7,864
|
[
"VisIt"
] |
6b6bc70c083f47b5aaa01592b94f6388f47b087d65f4c9e4d487a72198c1de3e
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
#
# Thanks to Peter Cock for the impetus to write the get_features() code to
# subselect Features.
#
################################################################################
"""FeatureSet module
Provides:
- FeatureSet - container for Feature objects
For drawing capabilities, this module uses reportlab to draw and write
the diagram: http://www.reportlab.com
"""
# -----------------------------------------------------------------------------
# IMPORTS
# ReportLab
from __future__ import print_function
from reportlab.pdfbase import _fontdata
from reportlab.lib import colors
# GenomeDiagram
from ._Feature import Feature
# Builtins
import re
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# CLASSES
# ------------------------------------------------------------
# FeatureSet
class FeatureSet(object):
"""FeatureSet object."""
def __init__(self, set_id=None, name=None, parent=None):
"""Create the object.
Arguments:
- set_id: Unique id for the set
- name: String identifying the feature set
"""
self.parent = parent
self.id = id # Unique id for the set
self.next_id = 0 # counter for unique feature ids
self.features = {} # Holds features, keyed by ID
self.name = name # String describing the set
def add_feature(self, feature, **kwargs):
"""Add a new feature.
Arguments:
- feature: Bio.SeqFeature object
- kwargs: Keyword arguments for Feature. Named attributes
of the Feature
Add a Bio.SeqFeature object to the diagram (will be stored
internally in a Feature wrapper).
"""
id = self.next_id # get id number
f = Feature(self, id, feature)
self.features[id] = f # add feature
for key in kwargs:
if key == "colour" or key == "color":
# Deal with "colour" as a special case by also mapping to color.
# If Feature.py used a python property we wouldn't need to call
# set_color explicitly. However, this is important to make sure
# every color gets mapped to a colors object - for example color
# numbers, or strings (may not matter for PDF, but does for PNG).
self.features[id].set_color(kwargs[key])
continue
setattr(self.features[id], key, kwargs[key])
self.next_id += 1 # increment next id
return f
def del_feature(self, feature_id):
"""Delete a feature.
Arguments:
- feature_id: Unique id of the feature to delete
Remove a feature from the set, indicated by its id.
"""
del self.features[feature_id]
def set_all_features(self, attr, value):
"""Set an attribute of all the features.
Arguments:
- attr: An attribute of the Feature class
- value: The value to set that attribute to
Set the passed attribute of all features in the set to the
passed value.
"""
changed = 0
for feature in self.features.values():
# If the feature has the attribute, and the value should change
if hasattr(feature, attr):
if getattr(feature, attr) != value:
setattr(feature, attr, value) # set it to the passed value
# For backwards compatibility, we support both colour and color.
# As a quick hack, make "colour" set both "colour" and "color".
# if attr=="colour":
# self.set_all_feature("color",value)
def get_features(self, attribute=None, value=None, comparator=None):
"""Retreive features.
Arguments:
- attribute: String, attribute of a Feature object
- value: The value desired of the attribute
- comparator: String, how to compare the Feature attribute to the
passed value
If no attribute or value is given, return a list of all features in the
feature set. If both an attribute and value are given, then depending
on the comparator, then a list of all features in the FeatureSet
matching (or not) the passed value will be returned. Allowed comparators
are: 'startswith', 'not', 'like'.
The user is expected to make a responsible decision about which feature
attributes to use with which passed values and comparator settings.
"""
# If no attribute or value specified, return all features
if attribute is None or value is None:
return list(self.features.values())
# If no comparator is specified, return all features where the attribute
# value matches that passed
if comparator is None:
return [feature for feature in self.features.values() if
getattr(feature, attribute) == value]
# If the comparator is 'not', return all features where the attribute
# value does not match that passed
elif comparator == 'not':
return [feature for feature in self.features.values() if
getattr(feature, attribute) != value]
# If the comparator is 'startswith', return all features where the attribute
# value does not match that passed
elif comparator == 'startswith':
return [feature for feature in self.features.values() if
getattr(feature, attribute).startswith(value)]
# If the comparator is 'like', use a regular expression search to identify
# features
elif comparator == 'like':
return [feature for feature in self.features.values() if
re.search(value, getattr(feature, attribute))]
# As a final option, just return an empty list
return []
def get_ids(self):
"""Return a list of all ids for the feature set."""
return list(self.features.keys())
def range(self):
"""Returns the lowest and highest base (or mark) numbers as a tuple."""
lows, highs = [], []
for feature in self.features.values():
for start, end in feature.locations:
lows.append(start)
highs.append(end)
if len(lows) != 0 and len(highs) != 0: # Default in case there is
return (min(lows), max(highs)) # nothing in the set
return 0, 0
def to_string(self, verbose=0):
"""Returns a formatted string with information about the set
Arguments:
- verbose: Boolean indicating whether a short (default) or
complete account of the set is required
"""
if not verbose: # Short account only required
return "%s" % self
else: # Long account desired
outstr = ["\n<%s: %s>" % (self.__class__, self.name)]
outstr.append("%d features" % len(self.features))
for key in self.features:
outstr.append("feature: %s" % self.features[key])
return "\n".join(outstr)
def __len__(self):
"""Return the number of features in the set."""
return len(self.features)
def __getitem__(self, key):
"""Return a feature, keyed by id."""
return self.features[key]
def __str__(self):
"""Returns a formatted string with information about the feature set."""
outstr = ["\n<%s: %s %d features>" % (self.__class__, self.name,
len(self.features))]
return "\n".join(outstr)
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
from Bio import SeqIO
genbank_entry = SeqIO.read('/data/Genomes/Bacteria/Nanoarchaeum_equitans/NC_005213.gbk', 'gb')
# Test code
gdfs = FeatureSet(0, 'Nanoarchaeum equitans CDS')
for feature in genbank_entry.features:
if feature.type == 'CDS':
gdfs.add_feature(feature)
# print len(gdfs)
# print gdfs.get_ids()
# gdfs.del_feature(560)
# print gdfs.get_ids()
# print gdfs.get_features()
# for feature in gdfs.get_features():
# print feature.id, feature.start, feature.end
# print gdfs[500]
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Graphics/GenomeDiagram/_FeatureSet.py
|
Python
|
gpl-2.0
| 8,994
|
[
"Biopython"
] |
5f8666ba884a35b339ccdf38ce49f2d4c7b7f6e7e561b7f949d68b8779eecbd0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Abhay Devasthale and Martin Raspaud
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# Adam Dybbroe <adam.dybbroe@smhi.se>
# Sajid Pareeth <sajid.pareeth@fmach.it>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Loader for ascat, netcdf format.
The driver works for netcdf format of ASCAT soil moisture swath data downloaded from
here: http://navigator.eumetsat.int/discovery/Start/DirectSearch/DetailResult.do?f%28r0%29=EO:EUM:DAT:METOP:SOMO12
rename the CONFIG file mpop/mpop/etc/metop.ascat.cfg.template to metop.cfg to read the ASCAT data
"""
import numpy as np
from ConfigParser import ConfigParser
from mpop import CONFIG_PATH
import os
from netCDF4 import Dataset
def load(satscene):
"""Load ascat data.
"""
# Read config file content
conf = ConfigParser()
conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
values = {"orbit": satscene.orbit,
"satname": satscene.satname,
"number": satscene.number,
"instrument": satscene.instrument_name,
"satellite": satscene.fullname,
"time_slot": satscene.time_slot,
"time": satscene.time_slot.strftime('%Y%m%d%H%M%S')
}
filename = os.path.join(
conf.get("ascat-level2", "dir"),
satscene.time_slot.strftime(conf.get("ascat-level2",
"filename",
raw=True)) % values)
# Load data from netCDF file
ds = Dataset(filename, 'r')
for chn_name in satscene.channels_to_load:
# Read variable corresponding to channel name
data = np.ma.masked_array(
ds.variables[chn_name][:], np.isnan(ds.variables[chn_name][:]))
satscene[chn_name] = data
lons = ds.variables['longitude'][:]
lats = ds.variables['latitude'][:]
# Set scene area as pyresample geometry object
try:
from pyresample import geometry
satscene.area = geometry.SwathDefinition(lons=lons, lats=lats)
except ImportError:
# pyresample not available. Set lon and lats directly
satscene.area = None
satscene.lat = lats
satscene.lon = lons
|
mraspaud/mpop
|
mpop/satin/ascat_nc.py
|
Python
|
gpl-3.0
| 2,879
|
[
"NetCDF"
] |
3336db7f26cae3cc8f956409649f88c08da0a8a04a2d2dd1dd34f9a5da1270ba
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow
from apiclient.discovery import build
from gdcmdtools.base import BASE_INFO
import httplib2
import pprint
import shutil
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DICT_OF_REDIRECT_URI = {
"oob": "(default) means \"urn:ietf:wg:oauth:2.0:oob\"",
"local": "means \"http://localhost\""
}
SCOPE = [
# if using /drive.file instead of /drive,
# then the fusion table is not seen by drive.files.list()
# also, drive.parents.insert() fails.
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/fusiontables',
'https://www.googleapis.com/auth/drive.scripts',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email'
]
class GDAuth(object):
def __init__(self, secret_file=None, if_oob=True):
default_secret_file = os.path.expanduser(
'~/.%s.secrets' % BASE_INFO["app"])
if secret_file is None:
self.secret_file = default_secret_file
else:
# should reissue the credencials
storage_file = os.path.expanduser('~/.%s.creds' % BASE_INFO["app"])
if os.path.isfile(storage_file):
os.remove(storage_file)
try:
shutil.copyfile(secret_file, default_secret_file)
except:
logger.error('failed to copy secret file')
self.secret_file = default_secret_file
os.chmod(self.secret_file, 0o600)
self.if_oob = if_oob
def run(self):
credentials = self.get_credentials()
return credentials
def get_credentials(self):
#home_path = os.getenv("HOME")
# storage_file = os.path.abspath(
# '%s/.%s.creds' % (home_path,BASE_INFO["app"]))
storage_file = os.path.expanduser('~/.%s.creds' % BASE_INFO["app"])
logger.debug('storage_file=%s' % storage_file)
try:
with open(storage_file):
pass
except IOError:
logger.error('storage_file: %s not exists' % storage_file)
# return None
storage = Storage(storage_file)
credentials = storage.get()
# FIXME: if secret_file is given, should clean creds
if credentials is None or credentials.invalid:
# credentials_file = os.path.abspath(
# '%s/.%s.secrets' % (home_path,BASE_INFO["app"]))
credentials_file = self.secret_file
#logger.debug('credentials_file=%s' % credentials_file)
if self.if_oob:
redirect_uri = 'urn:ietf:wg:oauth:2.0:oob'
else:
redirect_uri = None
try:
flow = flow_from_clientsecrets(
credentials_file,
scope=SCOPE,
redirect_uri=redirect_uri)
except:
logger.error("failed on flow_from_clientsecrets()")
return None
if self.if_oob:
auth_uri = flow.step1_get_authorize_url()
logger.info(
'Please visit the URL in your browser: %s' %
auth_uri)
code = raw_input('Insert the given code: ')
try:
credentials = flow.step2_exchange(code)
except:
logger.error("failed on flow.step2_exchange()")
return None
storage.put(credentials)
credentials.set_store(storage)
else:
try:
credentials = tools.run_flow(flow, storage)
except:
logger.error("failed on oauth2client.tools.run_flow()")
return None
self.credentials = credentials
return self.credentials
def get_authorized_http(self):
self.http = httplib2.Http()
self.credentials.authorize(self.http)
#wrapped_request = self.http.request
# FIXME
def _Wrapper(uri, method="GET", body=None, headers=None, **kw):
logger.debug('Req: %s %s' % (uri, method))
logger.debug('Req headers:\n%s' % pprint.pformat(headers))
logger.debug('Req body:\n%s' % pprint.pformat(body))
resp, content = wrapped_request(uri, method, body, headers, **kw)
logger.debug('Rsp headers:\n%s' % pprint.pformat(resp))
logger.debug('Rsp body:\n%s' % pprint.pformat(content))
return resp, content
#self.http.request = _Wrapper
return self.http
|
tienfuc/gdcmdtools
|
gdcmdtools/auth.py
|
Python
|
bsd-2-clause
| 4,840
|
[
"VisIt"
] |
08bf06e46972be0a8f667aabad411ce8f966b2d161accd6d6ff0964613a98f50
|
import itertools
import argparse
import sys
import os
import errno
import subprocess
import winbrew
import winbrew.util
class InstallException(Exception):
pass
class InstallPlan:
"""
Executes a topological sort to determine the order in which to install
packages.
"""
def __init__(self, formulas, args):
self.args = args
self.order = []
self.marked = set()
self.temp = set()
self.initial_formulas = set(formulas)
self.force_rebuild = args.force_rebuild
self.force_reinstall = args.force_reinstall
self.force_redownload = args.force_redownload
self.env = os.environ.copy()
for formula in formulas:
self.visit(formula)
def visit(self, formula):
"""
Visit the node with the given name and all children in a depth-first
search.
"""
if formula.name in self.temp:
raise InstallException('circular dependency')
if formula.name not in self.marked:
self.temp.add(formula.name)
for dep_name in itertools.chain(formula.deps, formula.build_deps):
dep = winbrew.Formula.find_by_name(dep_name)
self.visit(dep)
self.temp.remove(formula.name)
self.marked.add(formula.name)
self.order.append(formula)
def __iter__(self):
"""
Returns a list of packages to be installed in dependency-order, or
throws an exception if there are circular dependencies.
"""
return iter(self.order)
def execute(self):
"""
Install all packages in the install plan
"""
self.download()
self.unpack()
self.build()
self.install()
def download(self):
for formula in self:
formula.download(force=(formula in self.initial_formulas) and self.force_redownload)
def unpack(self):
for formula in self:
formula.unpack()
def build(self):
for formula in self:
formula.build(force=(formula in self.initial_formulas) and self.force_rebuild)
def install(self):
for formula in self:
formula.install(force=(formula in self.initial_formulas) and self.force_reinstall)
def uninstall(args):
"""
Uninstall a package. FIXME: Eventually, this should uninstall packages
that depend on this package. For now, just nuke the installed files.
"""
for name in args.package:
formula = winbrew.Formula.find_by_name(name)
formula.uninstall(force=args.force)
def test(args):
"""
Test a package.
"""
for name in args.package:
formula = winbrew.Formula.find_by_name(name)
formula.test()
print('PASS')
def listp(args):
"""
List package contents
"""
for name in args.package:
formula = winbrew.Formula.find_by_name(name)
print(('\n'.join(formula.manifest.files)))
def update(args):
"""
Update by cloning formulas from the git repository.
"""
os.chdir(winbrew.home)
cmd = ('git', 'pull')
subprocess.check_call(cmd, shell=True)
def download(args):
"""
Download a formula, but don't unpack or install it
"""
for name in args.package:
formula = winbrew.Formula.find_by_name(name)
formula.download()
def clean(args):
"""
Clean a formula from the cache
"""
for name in args.package:
formula = winbrew.Formula.find_by_name(name)
formula.clean()
def install(args):
"""
Install a package and dependencies.
"""
try:
formulas = []
for i, name in enumerate(args.package):
formula = winbrew.Formula.find_by_name(name)
formula.parse_options(args.package[i+1:])
formulas.append(formula)
plan = InstallPlan(formulas, args)
plan.execute()
except InstallException as e:
sys.stderr.write('error: %s\n' % str(e))
sys.exit(1)
def reinstall(args):
"""
Reinstall packages
"""
if args.all:
args.package += [manifest.name for manifest in winbrew.Manifest.all()]
args.force_reinstall = True
args.force_rebuild = True
args.force_redownload = True
install(args)
def edit(args):
"""
Edit a package.
"""
path = os.path.join(winbrew.formula_path, '%s.py' % args.name)
if not os.path.exists(path):
sys.stderr.write('error: file formula not found: %s\n' % args.name)
sys.exit(1)
editor = os.environ.get('EDITOR', 'notepad')
try:
subprocess.check_call((editor, path), shell=True)
except subprocess.CalledProcessError as e:
pass
except SystemError as e:
sys.stderr.write('error: %s\n' % str(e))
sys.stderr.flush()
sys.exit(1)
def create(args):
"""
Create a new package.
"""
base = os.path.split(args.url)[1]
base = base.split('.')[0]
base = base.split('-')[0]
base = base.split('_')[0]
name = args.name or base
template = """
import winbrew
class %(name)s(winbrew.Formula):
url = '%(url)s'
homepage = ''
sha1 = ''
build_deps = ()
deps = ()
def build(self):
pass
def install(self):
pass
def test(self):
pass
"""
path = os.path.join(winbrew.formula_path, '%s.py' % name)
if not os.path.exists(path):
winbrew.util.mkdir_p(os.path.split(path)[0])
fd = open(path, 'w')
fd.write(template % {'name': name.title(), 'url': args.url})
fd.close()
args.name = name
edit(args)
def freeze(args):
"""
Output installed packages.
"""
print(('\n'.join([manifest.name for manifest in winbrew.Manifest.all()])))
def init():
if not os.path.exists(os.path.join(winbrew.home, '.git')):
cmd = ('git', 'clone', winbrew.formula_url, winbrew.home)
print((' '.join(cmd)))
subprocess.check_call(cmd, shell=True)
def main():
parser = argparse.ArgumentParser(prog='winbrew', description='Package installer for Windows')
subparsers = parser.add_subparsers(dest='command')
sub = subparsers.add_parser('create', help='create a new package')
sub.add_argument('url', type=str, help='package source URL')
sub.add_argument('-n', '--name', type=str, help='package name', default=None)
sub = subparsers.add_parser('edit', help='edit a package')
sub.add_argument('name', type=str, help='package source name')
sub = subparsers.add_parser('install', help='install packages')
sub.add_argument('--force-reinstall', action='store_true', help='force package reinstall (completely reinstall it)')
sub.add_argument('--force-rebuild', action='store_true', help='force package rebuild (completely rebuild it)')
sub.add_argument('--force-redownload', action='store_true', help='force package rebuild (completely rebuild it)')
sub.add_argument('package', type=str, nargs=argparse.REMAINDER, help='packages to install')
sub = subparsers.add_parser('reinstall', help='reinstall packages')
sub.add_argument('--all', '-a', action='store_true', help='reinstall all packages')
sub.add_argument('package', type=str, nargs=argparse.REMAINDER, help='packages to reinstall')
sub = subparsers.add_parser('uninstall', help='uninstall packages')
sub.add_argument('--force', '-f', action='store_true', help='force package uninstall')
sub.add_argument('package', type=str, nargs='+', help='packages to uninstall')
sub = subparsers.add_parser('list', help='list package contents')
sub.add_argument('package', type=str, nargs='+', help='packages to list')
sub = subparsers.add_parser('test', help='test packages')
sub.add_argument('package', type=str, nargs=argparse.REMAINDER, help='packages to test')
sub = subparsers.add_parser('freeze', help='output installed packages')
sub = subparsers.add_parser('update', help='update formulas from server')
sub = subparsers.add_parser('download', help='download formulas without installing them')
sub.add_argument('package', type=str, nargs=argparse.REMAINDER, help='packages to download')
sub = subparsers.add_parser('clean', help='clean formulas from the cache')
sub.add_argument('package', type=str, nargs=argparse.REMAINDER, help='packages to clean')
args = parser.parse_args()
try:
init()
if args.command == 'create':
create(args)
elif args.command == 'edit':
edit(args)
elif args.command == 'install':
install(args)
elif args.command == 'reinstall':
reinstall(args)
elif args.command == 'uninstall':
uninstall(args)
elif args.command == 'update':
update(args)
elif args.command == 'test':
test(args)
elif args.command == 'list':
listp(args)
elif args.command == 'freeze':
freeze(args)
elif args.command == 'download':
download(args)
elif args.command == 'clean':
clean(args)
else:
sys.stderr.write('error: unknown command')
sys.exit(1)
except winbrew.FormulaException as e:
sys.stderr.write('error: %s\n' % str(e))
sys.stderr.flush()
sys.exit(1)
if __name__ == '__main__':
main()
|
mfichman/winbrew
|
winbrew/execute.py
|
Python
|
mit
| 9,401
|
[
"VisIt"
] |
809db27e4aedc786efee77170afa746ff93cf7db192f0dc18cae00db4815e02b
|
import sys, itertools, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <flattened_gff_file> <sam_file> <output_file>",
description=
"This script counts how many reads in <sam_file> fall onto each exonic " +
"part given in <flattened_gff_file> and outputs a list of counts in " +
"<output_file>, for further analysis with the DEXSeq Bioconductor package. " +
"(Notes: The <flattened_gff_file> should be produced with the script " +
"dexseq_prepare_annotation.py). <sam_file> may be '-' to indicate standard input.",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-p", "--paired", type="choice", dest="paired",
choices = ( "no", "yes" ), default = "no",
help = "'yes' or 'no'. Indicates whether the data is paired-end (default: no)" )
optParser.add_option( "-s", "--stranded", type="choice", dest="stranded",
choices = ( "yes", "no", "reverse" ), default = "yes",
help = "'yes', 'no', or 'reverse'. Indicates whether the data is " +
"from a strand-specific assay (default: yes ). " +
"Be sure to switch to 'no' if you use a non strand-specific RNA-Seq library " +
"preparation protocol. 'reverse' inverts strands and is neede for certain " +
"protocols, e.g. paired-end with circularization." )
optParser.add_option( "-a", "--minaqual", type="int", dest="minaqual",
default = 10,
help = "skip all reads with alignment quality lower than the given " +
"minimum value (default: 10)" )
if len( sys.argv ) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len( args ) != 3:
sys.stderr.write( sys.argv[0] + ": Error: Please provide three arguments.\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gff_file = args[0]
sam_file = args[1]
out_file = args[2]
stranded = opts.stranded == "yes" or opts.stranded == "reverse"
reverse = opts.stranded == "reverse"
is_PE = opts.paired == "yes"
minaqual = opts.minaqual
if sam_file == "-":
sam_file = sys.stdin
# Step 1: Read in the GFF file as generated by aggregate_genes.py
# and put everything into a GenomicArrayOfSets
features = HTSeq.GenomicArrayOfSets( "auto", stranded=stranded )
for f in HTSeq.GFF_Reader( gff_file ):
if f.type == "exonic_part":
f.name = f.attr['gene_id'] + ":" + f.attr['exonic_part_number']
features[f.iv] += f
# initialise counters
num_reads = 0
counts = {}
counts[ '_empty' ] = 0
counts[ '_ambiguous' ] = 0
counts[ '_lowaqual' ] = 0
counts[ '_notaligned' ] = 0
# put a zero for each feature ID
for iv, s in features.steps():
for f in s:
counts[ f.name ] = 0
#We need this little helper below:
def reverse_strand( s ):
if s == "+":
return "-"
elif s == "-":
return "+"
else:
raise SystemError, "illegal strand"
# Now go through the aligned reads
if not is_PE:
num_reads = 0
for a in HTSeq.SAM_Reader( sam_file ):
if not a.aligned:
counts[ '_notaligned' ] += 1
continue
if a.aQual < minaqual:
counts[ '_lowaqual' ] += 1
continue
rs = set()
for cigop in a.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps( ):
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
counts[ '_empty' ] += 1
elif len( set_of_gene_names ) > 1:
counts[ '_ambiguous' ] +=1
else:
for f in rs:
counts[ f.name ] += 1
num_reads += 1
if num_reads % 100000 == 0:
sys.stderr.write( "%d reads processed.\n" % num_reads )
else: # paired-end
num_reads = 0
for af, ar in HTSeq.pair_SAM_alignments( HTSeq.SAM_Reader( sam_file ) ):
rs = set()
if af and ar and not af.aligned and not ar.aligned:
counts[ '_notaligned' ] += 1
continue
if af and ar and not af.aQual < minaqual and ar.aQual < minaqual:
counts[ '_lowaqual' ] += 1
continue
if af and af.aligned and af.aQual >= minaqual and af.iv.chrom in features.chrom_vectors.keys():
for cigop in af.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
if ar and ar.aligned and ar.aQual >= minaqual and ar.iv.chrom in features.chrom_vectors.keys():
for cigop in ar.cigar:
if cigop.type != "M":
continue
if not reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
counts[ '_empty' ] += 1
elif len( set_of_gene_names ) > 1:
counts[ '_ambiguous' ] = 0
else:
for f in rs:
counts[ f.name ] += 1
num_reads += 1
if num_reads % 100000 == 0:
sys.stderr.write( "%d reads processed.\n" % num_reads )
# Step 3: Write out the results
fout = open( out_file, "w" )
for fn in sorted( counts.keys() ):
fout.write( "%s\t%d\n" % ( fn, counts[fn] ) )
fout.close()
|
jhl667/quick_scripts
|
dexseq_count.py
|
Python
|
gpl-2.0
| 6,001
|
[
"Bioconductor",
"HTSeq"
] |
ea4ea8b40e1b2bba8057bbc9a517dfd6e5015a71c8b91d34f6f4f4fce4b0f346
|
# Copyright 2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for XML results returned by NCBI's Entrez Utilities. This
parser is used by the read() function in Bio.Entrez, and is not intended
be used directly.
"""
# The question is how to represent an XML file as Python objects. Some
# XML files returned by NCBI look like lists, others look like dictionaries,
# and others look like a mix of lists and dictionaries.
#
# My approach is to classify each possible element in the XML as a plain
# string, an integer, a list, a dictionary, or a structure. The latter is a
# dictionary where the same key can occur multiple times; in Python, it is
# represented as a dictionary where that key occurs once, pointing to a list
# of values found in the XML file.
#
# The parser then goes through the XML and creates the appropriate Python
# object for each element. The different levels encountered in the XML are
# preserved on the Python side. So a subelement of a subelement of an element
# is a value in a dictionary that is stored in a list which is a value in
# some other dictionary (or a value in a list which itself belongs to a list
# which is a value in a dictionary, and so on). Attributes encountered in
# the XML are stored as a dictionary in a member .attributes of each element,
# and the tag name is saved in a member .tag.
#
# To decide which kind of Python object corresponds to each element in the
# XML, the parser analyzes the DTD referred at the top of (almost) every
# XML file returned by the Entrez Utilities. This is preferred over a hand-
# written solution, since the number of DTDs is rather large and their
# contents may change over time. About half the code in this parser deals
# wih parsing the DTD, and the other half with the XML itself.
import os.path
import urlparse
import urllib
import warnings
from xml.parsers import expat
# The following four classes are used to add a member .attributes to integers,
# strings, lists, and dictionaries, respectively.
class IntegerElement(int):
def __repr__(self):
text = int.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "IntegerElement(%s, attributes=%s)" % (text, repr(attributes))
class StringElement(str):
def __repr__(self):
text = str.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "StringElement(%s, attributes=%s)" % (text, repr(attributes))
class UnicodeElement(unicode):
def __repr__(self):
text = unicode.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "UnicodeElement(%s, attributes=%s)" % (text, repr(attributes))
class ListElement(list):
def __repr__(self):
text = list.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "ListElement(%s, attributes=%s)" % (text, repr(attributes))
class DictionaryElement(dict):
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
# A StructureElement is like a dictionary, but some of its keys can have
# multiple values associated with it. These values are stored in a list
# under each key.
class StructureElement(dict):
def __init__(self, keys):
dict.__init__(self)
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
class DataHandler:
home = os.path.expanduser('~')
local_dtd_dir = os.path.join(home, '.biopython', 'Bio', 'Entrez', 'DTDs')
del home
from Bio import Entrez
global_dtd_dir = os.path.join(str(Entrez.__path__[0]), "DTDs")
del Entrez
def __init__(self, validate):
self.stack = []
self.errors = []
self.integers = []
self.strings = []
self.lists = []
self.dictionaries = []
self.structures = {}
self.items = []
self.dtd_urls = []
self.validating = validate
self.parser = expat.ParserCreate(namespace_separator=" ")
self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
self.parser.XmlDeclHandler = self.xmlDeclHandler
def read(self, handle):
"""Set up the parser and let it parse the XML results"""
try:
self.parser.ParseFile(handle)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure that
# we are parsing XML data. Most likely, the XML file is
# corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError(e)
try:
return self.object
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat didn't notice
# any errors, so self.object should be defined. If not, this is
# a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError("XML declaration not found")
def parse(self, handle):
BLOCK = 1024
while True:
#Read in another block of the file...
text = handle.read(BLOCK)
if not text:
# We have reached the end of the XML file
if self.stack:
# No more XML data, but there is still some unfinished
# business
raise CorruptedXMLError
try:
for record in self.object:
yield record
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype=="Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype=="List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.validating:
raise ValidationError(name)
else:
# this will not be stored in the record
object = ""
if object!="":
object.tag = name
if attrs:
object.attributes = dict(attrs)
if len(self.stack)!=0:
current = self.stack[-1]
try:
current.append(object)
except AttributeError:
current[name] = object
self.stack.append(object)
def endElementHandler(self, name):
value = self.content
if name in self.errors:
if value=="":
return
else:
raise RuntimeError(value)
elif name in self.integers:
value = IntegerElement(value)
elif name in self.strings:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
elif name in self.items:
self.object = self.stack.pop()
if self.object.itemtype in ("List", "Structure"):
return
elif self.object.itemtype=="Integer" and value:
value = IntegerElement(value)
else:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
name = self.object.itemname
else:
self.object = self.stack.pop()
return
value.tag = name
if self.attributes:
value.attributes = dict(self.attributes)
del self.attributes
current = self.stack[-1]
if current!="":
try:
current.append(value)
except AttributeError:
current[name] = value
def characterDataHandler(self, content):
self.content += content
def elementDecl(self, name, model):
"""This callback function is called for each element declaration:
<!ELEMENT name (...)>
encountered in a DTD. The purpose of this function is to determine
whether this element should be regarded as a string, integer, list
dictionary, structure, or error."""
if name.upper()=="ERROR":
self.errors.append(name)
return
if name=='Item' and model==(expat.model.XML_CTYPE_MIXED,
expat.model.XML_CQUANT_REP,
None, ((expat.model.XML_CTYPE_NAME,
expat.model.XML_CQUANT_NONE,
'Item',
()
),
)
):
# Special case. As far as I can tell, this only occurs in the
# eSummary DTD.
self.items.append(name)
return
# First, remove ignorable parentheses around declarations
while (model[0] in (expat.model.XML_CTYPE_SEQ,
expat.model.XML_CTYPE_CHOICE)
and model[1] in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT)
and len(model[3])==1):
model = model[3][0]
# PCDATA declarations correspond to strings
if model[0] in (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CTYPE_EMPTY):
self.strings.append(name)
return
# List-type elements
if (model[0] in (expat.model.XML_CTYPE_CHOICE,
expat.model.XML_CTYPE_SEQ) and
model[1] in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP)):
self.lists.append(name)
return
# This is the tricky case. Check which keys can occur multiple
# times. If only one key is possible, and it can occur multiple
# times, then this is a list. If more than one key is possible,
# but none of them can occur multiple times, then this is a
# dictionary. Otherwise, this is a structure.
# In 'single' and 'multiple', we keep track which keys can occur
# only once, and which can occur multiple times.
single = []
multiple = []
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
def count(model):
quantifier, name, children = model[1:]
if name==None:
if quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
for child in children:
multiple.append(child[2])
else:
for child in children:
count(child)
elif name.upper()!="ERROR":
if quantifier in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT):
single.append(name)
elif quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
multiple.append(name)
count(model)
if len(single)==0 and len(multiple)==1:
self.lists.append(name)
elif len(multiple)==0:
self.dictionaries.append(name)
else:
self.structures.update({name: multiple})
def open_dtd_file(self, filename):
path = os.path.join(DataHandler.global_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def externalEntityRefHandler(self, context, base, systemId, publicId):
"""The purpose of this function is to load the DTD locally, instead
of downloading it from the URL specified in the XML. Using the local
DTD results in much faster parsing. If the DTD is not found locally,
we try to download it. If new DTDs become available from NCBI,
putting them in Bio/Entrez/DTDs will allow the parser to see them."""
urlinfo = urlparse.urlparse(systemId)
#Following attribute requires Python 2.5+
#if urlinfo.scheme=='http':
if urlinfo[0]=='http':
# Then this is an absolute path to the DTD.
url = systemId
elif urlinfo[0]=='':
# Then this is a relative path to the DTD.
# Look at the parent URL to find the full path.
url = self.dtd_urls[-1]
source = os.path.dirname(url)
url = os.path.join(source, systemId)
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
message = """\
Unable to load DTD file %s.
Bio.Entrez uses NCBI's DTD files to parse XML files returned by NCBI Entrez.
Though most of NCBI's DTD files are included in the Biopython distribution,
sometimes you may find that a particular DTD file is missing. While we can
access the DTD file through the internet, the parser is much faster if the
required DTD files are available locally.
For this purpose, please download %s from
%s
and save it either in directory
%s
or in directory
%s
in order for Bio.Entrez to find it.
Alternatively, you can save %s in the directory
Bio/Entrez/DTDs in the Biopython distribution, and reinstall Biopython.
Please also inform the Biopython developers about this missing DTD, by
reporting a bug on http://bugzilla.open-bio.org/ or sign up to our mailing
list and emailing us, so that we can include it with the next release of
Biopython.
Proceeding to access the DTD file through the internet...
""" % (filename, filename, url, self.global_dtd_dir, self.local_dtd_dir, filename)
warnings.warn(message)
try:
handle = urllib.urlopen(url)
except IOError:
raise RuntimeException("Failed to access %s at %s" % (filename, url))
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Entrez/Parser.py
|
Python
|
gpl-2.0
| 20,827
|
[
"Biopython"
] |
8288b865d01f2e70f0ad6463bc133aa28cb80311a9fd758686f51b214f68b9fe
|
#coding= utf-8
class PhoneticAlgorithms():
'''
This script implements the Double Metaphone algorithm (c) 1998, 1999 by Lawrence Philips
it was translated to Python from the C source written by Kevin Atkinson (http://aspell.net/metaphone/)
By Andrew Collins - January 12, 2007 who claims no rights to this work
http://www.atomodo.com/code/double-metaphone
Tested with Pyhon 2.4.3
Updated Feb 14, 2007 - Found a typo in the 'gh' section
Updated Dec 17, 2007 - Bugs fixed in 'S', 'Z', and 'J' sections. Thanks Chris Leong!
Updated June 25, 2010 - several bugs fixed thanks to Nils Johnsson for a spectacular
bug squashing effort. There were many cases where this function wouldn't give the same output
as the original C source that were fixed by his careful attention and excellent communication.
The script was also updated to use utf-8 rather than latin-1.
'''
def __init__(self):
pass
def double_metaphone(self, st) :
"""
dm(string) -> (string, string or None)
returns the double metaphone codes for given string - always a tuple
there are no checks done on the input string, but it should be a single word or name.
"""
vowels = ['A', 'E', 'I', 'O', 'U', 'Y']
st = st.decode('utf-8', 'ignore')
st = st.upper() # st is short for string. I usually prefer descriptive over short, but this var is used a lot!
is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or st.find('CZ') > -1 or st.find('WITZ') > -1)
length = len(st)
first = 2
st = ('-') * first + st + (' ' * 5) # so we can index beyond the begining and end of the input string
last = first + length -1
pos = first # pos is short for position
pri = sec = '' # primary and secondary metaphone codes
#skip these silent letters when at start of word
if st[first:first+2] in ["GN", "KN", "PN", "WR", "PS"] :
pos += 1
# Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if st[first] == 'X' :
pri = sec = 'S' #'Z' maps to 'S'
pos += 1
# main loop through chars in st
while pos <= last :
#print str(pos) + '\t' + st[pos]
ch = st[pos] # ch is short for character
# nxt (short for next characters in metaphone code) is set to a tuple of the next characters in
# the primary and secondary codes and how many characters to move forward in the string.
# the secondary code letter is given only when it is different than the primary.
# This is just a trick to make the code easier to write and read.
nxt = (None, 1) # default action is to add nothing and move to next char
if ch in vowels :
nxt = (None, 1)
if pos == first : # all init vowels now map to 'A'
nxt = ('A', 1)
elif ch == 'B' :
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if st[pos+1] == 'B' :
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'C' :
# various germanic
if (pos > (first + 1) and st[pos-2] not in vowels and st[pos-1:pos+2] == 'ACH' and \
(st[pos+2] not in ['I', 'E'] or st[pos-2:pos+4] in ['BACHER', 'MACHER'])) :
nxt = ('K', 2)
# special case 'CAESAR'
elif pos == first and st[first:first+6] == 'CAESAR' :
nxt = ('S', 2)
elif st[pos:pos+4] == 'CHIA' : #italian 'chianti'
nxt = ('K', 2)
elif st[pos:pos+2] == 'CH' :
# find 'michael'
if pos > first and st[pos:pos+4] == 'CHAE' :
nxt = ('K', 'X', 2)
elif pos == first and (st[pos+1:pos+6] in ['HARAC', 'HARIS'] or \
st[pos+1:pos+4] in ["HOR", "HYM", "HIA", "HEM"]) and st[first:first+5] != 'CHORE' :
nxt = ('K', 2)
#germanic, greek, or otherwise 'ch' for 'kh' sound
elif st[first:first+4] in ['VAN ', 'VON '] or st[first:first+3] == 'SCH' \
or st[pos-2:pos+4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or st[pos+2] in ['T', 'S'] \
or ((st[pos-1] in ["A", "O", "U", "E"] or pos == first) \
and st[pos+2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W", " "]) :
nxt = ('K', 1)
else :
if pos > first :
if st[first:first+2] == 'MC' :
nxt = ('K', 2)
else :
nxt = ('X', 'K', 2)
else :
nxt = ('X', 2)
#e.g, 'czerny'
elif st[pos:pos+2] == 'CZ' and st[pos-2:pos+2] != 'WICZ' :
nxt = ('S', 'X', 2)
#e.g., 'focaccia'
elif st[pos+1:pos+4] == 'CIA' :
nxt = ('X', 3)
#double 'C', but not if e.g. 'McClellan'
elif st[pos:pos+2] == 'CC' and not (pos == (first +1) and st[first] == 'M') :
#'bellocchio' but not 'bacchus'
if st[pos+2] in ["I", "E", "H"] and st[pos+2:pos+4] != 'HU' :
#'accident', 'accede' 'succeed'
if (pos == (first +1) and st[first] == 'A') or \
st[pos-1:pos+4] in ['UCCEE', 'UCCES'] :
nxt = ('KS', 3)
#'bacci', 'bertucci', other italian
else:
nxt = ('X', 3)
else :
nxt = ('K', 2)
elif st[pos:pos+2] in ["CK", "CG", "CQ"] :
nxt = ('K', 'K', 2)
elif st[pos:pos+2] in ["CI", "CE", "CY"] :
#italian vs. english
if st[pos:pos+3] in ["CIO", "CIE", "CIA"] :
nxt = ('S', 'X', 2)
else :
nxt = ('S', 2)
else :
#name sent in 'mac caffrey', 'mac gregor
if st[pos+1:pos+3] in [" C", " Q", " G"] :
nxt = ('K', 3)
else :
if st[pos+1] in ["C", "K", "Q"] and st[pos+1:pos+3] not in ["CE", "CI"] :
nxt = ('K', 2)
else : # default for 'C'
nxt = ('K', 1)
elif ch == u'Ç' :
nxt = ('S', 1)
elif ch == 'D' :
if st[pos:pos+2] == 'DG' :
if st[pos+2] in ['I', 'E', 'Y'] : #e.g. 'edge'
nxt = ('J', 3)
else :
nxt = ('TK', 2)
elif st[pos:pos+2] in ['DT', 'DD'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'F' :
if st[pos+1] == 'F' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'G' :
if st[pos+1] == 'H' :
if pos > first and st[pos-1] not in vowels :
nxt = ('K', 2)
elif pos < (first + 3) :
if pos == first : #'ghislane', ghiradelli
if st[pos+2] == 'I' :
nxt = ('J', 2)
else :
nxt = ('K', 2)
#Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and st[pos-2] in ['B', 'H', 'D'] ) \
or (pos > (first + 2) and st[pos-3] in ['B', 'H', 'D'] ) \
or (pos > (first + 3) and st[pos-4] in ['B', 'H'] ) :
nxt = (None, 2)
else :
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and st[pos-1] == 'U' \
and st[pos-3] in ["C", "G", "L", "R", "T"] :
nxt = ('F', 2)
else :
if pos > first and st[pos-1] != 'I' :
nxt = ('K', 2)
elif st[pos+1] == 'N' :
if pos == (first +1) and st[first] in vowels and not is_slavo_germanic :
nxt = ('KN', 'N', 2)
else :
# not e.g. 'cagney'
if st[pos+2:pos+4] != 'EY' and st[pos+1] != 'Y' and not is_slavo_germanic :
nxt = ('N', 'KN', 2)
else :
nxt = ('KN', 2)
# 'tagliaro'
elif st[pos+1:pos+3] == 'LI' and not is_slavo_germanic :
nxt = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (st[pos+1] == 'Y' \
or st[pos+1:pos+3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]) :
nxt = ('K', 'J', 2)
# -ger-, -gy-
elif (st[pos+1:pos+2] == 'ER' or st[pos+1] == 'Y') \
and st[first:first+6] not in ["DANGER", "RANGER", "MANGER"] \
and st[pos-1] not in ['E', 'I'] and st[pos-1:pos+2] not in ['RGY', 'OGY'] :
nxt = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif st[pos+1] in ['E', 'I', 'Y'] or st[pos-1:pos+3] in ["AGGI", "OGGI"] :
# obvious germanic
if st[first:first+4] in ['VON ', 'VAN '] or st[first:first+3] == 'SCH' \
or st[pos+1:pos+3] == 'ET' :
nxt = ('K', 2)
else :
# always soft if french ending
if st[pos+1:pos+5] == 'IER ' :
nxt = ('J', 2)
else :
nxt = ('J', 'K', 2)
elif st[pos+1] == 'G' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'H' :
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or st[pos-1] in vowels) and st[pos+1] in vowels :
nxt = ('H', 2)
else : # (also takes care of 'HH')
nxt = (None, 1)
elif ch == 'J' :
# obvious spanish, 'jose', 'san jacinto'
if st[pos:pos+4] == 'JOSE' or st[first:first+4] == 'SAN ' :
if (pos == first and st[pos+4] == ' ') or st[first:first+4] == 'SAN ' :
nxt = ('H',)
else :
nxt = ('J', 'H')
elif pos == first and st[pos:pos+4] != 'JOSE' :
nxt = ('J', 'A') # Yankelovich/Jankelowicz
else :
# spanish pron. of e.g. 'bajador'
if st[pos-1] in vowels and not is_slavo_germanic \
and st[pos+1] in ['A', 'O'] :
nxt = ('J', 'H')
else :
if pos == last :
nxt = ('J', ' ')
else :
if st[pos+1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and st[pos-1] not in ["S", "K", "L"] :
nxt = ('J',)
else :
nxt = (None, )
if st[pos+1] == 'J' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'K' :
if st[pos+1] == 'K' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'L' :
if st[pos+1] == 'L' :
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and st[pos-1:pos+3] in ["ILLO", "ILLA", "ALLE"]) \
or ((st[last-1:last+1] in ["AS", "OS"] or st[last] in ["A", "O"]) \
and st[pos-1:pos+3] == 'ALLE') :
nxt = ('L', '', 2)
else :
nxt = ('L', 2)
else :
nxt = ('L', 1)
elif ch == 'M' :
if st[pos+1:pos+4] == 'UMB' \
and (pos + 1 == last or st[pos+2:pos+4] == 'ER') \
or st[pos+1] == 'M' :
nxt = ('M', 2)
else :
nxt = ('M', 1)
elif ch == 'N' :
if st[pos+1] == 'N' :
nxt = ('N', 2)
else :
nxt = ('N', 1)
elif ch == u'Ñ' :
nxt = ('N', 1)
elif ch == 'P' :
if st[pos+1] == 'H' :
nxt = ('F', 2)
elif st[pos+1] in ['P', 'B'] : # also account for "campbell", "raspberry"
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'Q' :
if st[pos+1] == 'Q' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'R' :
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not is_slavo_germanic \
and st[pos-2:pos] == 'IE' and st[pos-4:pos-2] not in ['ME', 'MA'] :
nxt = ('', 'R')
else :
nxt = ('R',)
if st[pos+1] == 'R' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'S' :
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if st[pos-1:pos+2] in ['ISL', 'YSL'] :
nxt = (None, 1)
# special case 'sugar-'
elif pos == first and st[first:first+5] == 'SUGAR' :
nxt =('X', 'S', 1)
elif st[pos:pos+2] == 'SH' :
# germanic
if st[pos+1:pos+5] in ["HEIM", "HOEK", "HOLM", "HOLZ"] :
nxt = ('S', 2)
else :
nxt = ('X', 2)
# italian & armenian
elif st[pos:pos+3] in ["SIO", "SIA"] or st[pos:pos+4] == 'SIAN' :
if not is_slavo_germanic :
nxt = ('S', 'X', 3)
else :
nxt = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and st[pos+1] in ["M", "N", "L", "W"]) or st[pos+1] == 'Z' :
nxt = ('S', 'X')
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif st[pos:pos+2] == 'SC' :
# Schlesinger's rule
if st[pos+2] == 'H' :
# dutch origin, e.g. 'school', 'schooner'
if st[pos+3:pos+5] in ["OO", "ER", "EN", "UY", "ED", "EM"] :
# 'schermerhorn', 'schenker'
if st[pos+3:pos+5] in ['ER', 'EN'] :
nxt = ('X', 'SK', 3)
else :
nxt = ('SK', 3)
else :
if pos == first and st[first+3] not in vowels and st[first+3] != 'W' :
nxt = ('X', 'S', 3)
else :
nxt = ('X', 3)
elif st[pos+2] in ['I', 'E', 'Y'] :
nxt = ('S', 3)
else :
nxt = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and st[pos-2:pos] in ['AI', 'OI'] :
nxt = ('', 'S', 1)
else :
nxt = ('S',)
if st[pos+1] in ['S', 'Z'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'T' :
if st[pos:pos+4] == 'TION' :
nxt = ('X', 3)
elif st[pos:pos+3] in ['TIA', 'TCH'] :
nxt = ('X', 3)
elif st[pos:pos+2] == 'TH' or st[pos:pos+3] == 'TTH' :
# special case 'thomas', 'thames' or germanic
if st[pos+2:pos+4] in ['OM', 'AM'] or st[first:first+4] in ['VON ', 'VAN '] \
or st[first:first+3] == 'SCH' :
nxt = ('T', 2)
else :
nxt = ('0', 'T', 2)
elif st[pos+1] in ['T', 'D'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'V' :
if st[pos+1] == 'V' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'W' :
# can also be in middle of word
if st[pos:pos+2] == 'WR' :
nxt = ('R', 2)
elif pos == first and (st[pos+1] in vowels or st[pos:pos+2] == 'WH') :
# Wasserman should match Vasserman
if st[pos+1] in vowels :
nxt = ('A', 'F', 1)
else :
nxt = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and st[pos-1] in vowels) \
or st[pos-1:pos+5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or st[first:first+3] == 'SCH' :
nxt = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif st[pos:pos+4] in ["WICZ", "WITZ"] :
nxt = ('TS', 'FX', 4)
else : # default is to skip it
nxt = (None, 1)
elif ch == 'X' :
# french e.g. breaux
nxt = (None,)
if not(pos == last and (st[pos-3:pos] in ["IAU", "EAU"] \
or st[pos-2:pos] in ['AU', 'OU'])):
nxt = ('KS',)
if st[pos+1] in ['C', 'X'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'Z' :
# chinese pinyin e.g. 'zhao'
if st[pos+1] == 'H' :
nxt = ('J',)
elif st[pos+1:pos+3] in ["ZO", "ZI", "ZA"] \
or (is_slavo_germanic and pos > first and st[pos-1] != 'T') :
nxt = ('S', 'TS')
else :
nxt = ('S',)
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
# ----------------------------------
# --- end checking letters------
# ----------------------------------
#print str(nxt)
if len(nxt) == 2 :
if nxt[0] :
pri += nxt[0]
sec += nxt[0]
pos += nxt[1]
elif len(nxt) == 3 :
if nxt[0] :
pri += nxt[0]
if nxt[1] :
sec += nxt[1]
pos += nxt[2]
if pri == sec :
return (pri, None)
else :
return (pri, sec)
if __name__ == '__main__' :
pa = PhoneticAlgorithms()
words = ['tomorrow', 'tomoro', 'twomorrow', 'today', 'twoday']
for s in words:
print s, ' -> ', pa.double_metaphone(s)
names = {'maurice':('MRS', None),'aubrey':('APR', None),'cambrillo':('KMPRL','KMPR')\
,'heidi':('HT', None),'katherine':('K0RN','KTRN'),'Thumbail':('0MPL','TMPL')\
,'catherine':('K0RN','KTRN'),'richard':('RXRT','RKRT'),'bob':('PP', None)\
,'eric':('ARK', None),'geoff':('JF','KF'),'Through':('0R','TR'), 'Schwein':('XN', 'XFN')\
,'dave':('TF', None),'ray':('R', None),'steven':('STFN', None),'bryce':('PRS', None)\
,'randy':('RNT', None),'bryan':('PRN', None),'Rapelje':('RPL', None)\
,'brian':('PRN', None),'otto':('AT', None),'auto':('AT', None), 'Dallas':('TLS', None)\
, 'maisey':('MS', None), 'zhang':('JNK', None), 'Chile':('XL', None)\
,'Jose':('HS', None), 'Arnow':('ARN','ARNF'), 'solilijs':('SLLS', None)\
, 'Parachute':('PRKT', None), 'Nowhere':('NR', None), 'Tux':('TKS', None)}
for name in names.keys() :
assert (pa.double_metaphone(name) == names[name]), 'For "%s" function returned %s. Should be %s.' % (name, pa.doubleMetaphone(name), names[name])
|
amsqr/k-Met
|
phonetic_algorithms.py
|
Python
|
gpl-3.0
| 21,699
|
[
"Brian"
] |
ca04bf02a6f97c6a85a174765f81af6b3adfb09d7125f8eae66bf11298fe2de7
|
import unittest
from hamcrest import *
from cis.data_io.products.NCAR_NetCDF_RAF import NCAR_NetCDF_RAF
from cis.test.integration.test_io.test_products.test_data_products import ProductTests
from cis.test.integration_test_data import cis_test_files
class TestNCAR_NetCDF_RAF(ProductTests, unittest.TestCase):
def setUp(self):
self.setup(cis_test_files["NCAR_NetCDF_RAF"], NCAR_NetCDF_RAF)
def test_can_concatenate_files_with_different_time_stamps(self):
from cis import read_data
import numpy as np
from cis.test.integration_test_data import valid_GASSP_station_files_with_different_timestamps,\
valid_GASSP_station_var_with_different_timestamps
var = valid_GASSP_station_var_with_different_timestamps
filename = valid_GASSP_station_files_with_different_timestamps
data = read_data(filename, var)
time_coord = data.coord(axis='T')
assert_that(np.min(time_coord.data), close_to(149107 + 54690.0/86400, 1e-5))
assert_that(np.max(time_coord.data), close_to(149110 + 81330.0/86400, 1e-5))
def test_can_concatenate_aircraft_files(self):
from cis import read_data
from cis.test.integration_test_data import valid_GASSP_aircraft_files_with_different_timestamps,\
valid_GASSP_aircraft_var_with_different_timestamps
data = read_data(valid_GASSP_aircraft_files_with_different_timestamps,
valid_GASSP_aircraft_var_with_different_timestamps)
time_coord = data.coord(axis='T')
assert_that(len(time_coord.data), equal_to(63609))
class TestNCAR_NetCDF_RAF_with_GASSP_aux_coord(ProductTests, unittest.TestCase):
def setUp(self):
self.setup(cis_test_files["GASSP_aux_coord"], NCAR_NetCDF_RAF)
def test_variable_wildcarding(self):
# We get all of the variables from the file like this - but this isn't the same as the set defined in the
# test data because they are all the same shape. These aren't.
self.vars = [u'AREADIST_DMA_OPC', u'VOLDIST_DMA_OPC', u'DYNAMIC_PRESSURE', u'NUMDIST_DMA_OPC', u'PRESSURE_ALTITUDE',
u'LONGITUDE', u'RELATIVE_HUMIDITY', u'AIR_TEMPERATURE', u'AIR_PRESSURE', u'TIME', u'LATITUDE']
super(TestNCAR_NetCDF_RAF_with_GASSP_aux_coord, self).test_variable_wildcarding()
class TestNCAR_NetCDF_RAF_with_GASSP_aeroplane(ProductTests, unittest.TestCase):
def setUp(self):
self.setup(cis_test_files["GASSP_aeroplane"], NCAR_NetCDF_RAF)
class TestNCAR_NetCDF_RAF_with_GASSP_ship(ProductTests, unittest.TestCase):
def setUp(self):
self.setup(cis_test_files["GASSP_ship"], NCAR_NetCDF_RAF)
class TestNCAR_NetCDF_RAF_with_GASSP_station(ProductTests, unittest.TestCase):
def setUp(self):
self.setup(cis_test_files["GASSP_station"], NCAR_NetCDF_RAF)
class TestNCAR_NetCDF_RAF_get_file_type_error(unittest.TestCase):
def test_WHEN_file_is_GASSP_THEN_no_errors(self):
from cis.test.integration_test_data import valid_GASSP_station_filename
product = NCAR_NetCDF_RAF()
errors = product.get_file_type_error(valid_GASSP_station_filename)
assert_that(errors, is_(None), "file should be GASSP")
def test_WHEN_file_is_NCAR_RAF_THEN_no_errors(self):
from cis.test.integration_test_data import valid_NCAR_NetCDF_RAF_filename
product = NCAR_NetCDF_RAF()
errors = product.get_file_type_error(valid_NCAR_NetCDF_RAF_filename)
assert_that(errors, is_(None), "file should be GASSP")
def test_WHEN_file_dose_not_exist_THEN_errors(self):
from cis.test.integration_test_data import invalid_filename
product = NCAR_NetCDF_RAF()
errors = product.get_file_type_error(invalid_filename)
assert_that(errors, is_(["File does not exist"]), "file should not exist")
def test_WHEN_file_is_not_NCAR_RAF_OR_GASSP_THEN_errors(self):
from cis.test.integration_test_data import valid_hadgem_filename
product = NCAR_NetCDF_RAF()
errors = product.get_file_type_error(valid_hadgem_filename)
assert_that(len(errors), is_(1), "file should not be GASSP")
def test_WHEN_file_is_not_netcdf_THEN_errors(self):
from cis.test.integration_test_data import valid_aeronet_filename
product = NCAR_NetCDF_RAF()
errors = product.get_file_type_error(valid_aeronet_filename)
assert_that(len(errors), is_(2), "file should not be netcdf")
|
cedadev/cis
|
cis/test/integration/test_io/test_products/test_ncar_raf.py
|
Python
|
lgpl-3.0
| 4,482
|
[
"NetCDF"
] |
82f281dbdc615a9c32c9d2c913e868357f69d1b9c5fed2c1802b2dc2cfcd1ebc
|
'''
PipelineLncRNA.py - functions and classes for use with the lincRNA pipeline
===========================================================================
'''
import re
import sys
import os
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import gzip
import collections
import CGAT.IndexedGenome as IndexedGenome
import CGAT.IndexedFasta as IndexedFasta
import CGATPipelines.Pipeline as P
import CGAT.Experiment as E
import sqlite3
import tempfile
import string
from copy import deepcopy
try:
import bx.intervals.io
import bx.align.maf
import bx.intervals
import bx.interval_index_file
except ImportError:
# bx library not python3 compatible
pass
########################################################
# gene set building
########################################################
def buildCodingGeneSet(abinitio_coding, reference, outfile):
'''
takes the output from cuffcompare of a transcript
assembly and filters for annotated protein coding
genes.
NB "pruned" refers to nomenclature in the transcript
building pipeline - transcripts that appear in at least
two samples.
Because an abinitio assembly will often contain
fragments of known transcripts and describe them as
novel, the default behaviour is to produce a set that
is composed of 'complete' transcripts
'''
inf = IOTools.openFile(abinitio_coding)
outf = gzip.open(outfile, "w")
coding = {}
coding["protein_coding"] = GTF.readAndIndex(GTF.iterator_filtered(
GTF.iterator(
IOTools.openFile(
reference)), source="protein_coding"), with_value=False)
for gtf in GTF.iterator(inf):
if coding["protein_coding"].contains(gtf.contig, gtf.start, gtf.end):
if gtf.class_code == "=":
outf.write("%s\n" % str(gtf))
outf.close()
def buildRefcodingGeneSet(coding_set, refcoding_set, outfile):
'''takes genes from an ab initio assembly and filters a reference
coding set for these genes. Allows for comparisons of known
transcripts for those genes that are assembled ab initio. Does
this by gene name
'''
keep_genes = set()
for gtf in GTF.iterator(IOTools.openFile(coding_set)):
keep_genes.add(gtf.gene_name)
outf = gzip.open(outfile, "w")
for gtf in GTF.iterator(IOTools.openFile(refcoding_set)):
if gtf.gene_name in keep_genes:
outf.write("%s\n" % gtf)
outf.close()
def buildRefnoncodingGeneSet(reference, outfile):
'''
filter the refnoncoding geneset for things that are described in ensembl
as being:
Ambiguous_orf
Retained_intron
Sense_intronic
antisense
Sense_overlapping
Processed transcript
'''
statement = ("zcat %(reference)s |"
" awk '$2 == \"lincRNA\" "
" || $2 == \"non_coding\" "
" || $2 == \"3prime_overlapping_ncrna\" "
" || $2 == \"ncRNA_host\"'"
" | gzip > %(outfile)s")
P.run()
def buildLncRNAGeneSet(abinitio_lincrna,
reference,
refnoncoding,
pseudogenes_gtf,
numts_gtf, outfile,
min_length):
'''
build lncRNA gene set.
In contrast to the pipeline, this lincRNA set does not contain the
reference noncoding gene set. It is transcripts in the abinitio set
that do not overlap at any protein coding, processed or pseudogene
transcripts (exons+introns) in a reference gene set.
lincRNA genes are often expressed at low level and thus the resultant
transcript models are fragmentory. To avoid some double counting in
downstream analyses transcripts overlapping on the same strand are merged
- this does not neccessarily seem to work well if not using the
reference set.
Transcripts need to have a length of at least 200 bp.
'''
infile_abinitio = abinitio_lincrna
reference_gtf = reference
refnoncoding_gtf = refnoncoding
pseudogenes_gtf = pseudogenes_gtf
numts_gtf = numts_gtf
E.info("indexing geneset for filtering")
# 17/11/2012 Jethro added Ig genes to list.
# (NB 31 entries in $2 of reference.gtf.gz)
# 10/12/2012 Nick added CDS.
# This will then filter anything that overlaps with a CDS
# NB this is the annotation for RefSeq and so requires the user to
# have created a reference annotation that includes this annotation.
input_sections = ("protein_coding",
"processed_pseudogene",
"unprocessed_pseudogene",
"nonsense_mediated_decay",
"retained_intron",
"IG_V_gene",
"IG_J_gene",
"IG_C_gene",
"IG_D_gene",
"IG_LV_gene",
"TR_V_gene",
"CDS")
# create a dictionary containing a separate index for each input section
indices = {}
for section in input_sections:
indices[section] = GTF.readAndIndex(
GTF.iterator_filtered(GTF.iterator(
IOTools.openFile(reference_gtf)), source=section),
with_value=True)
E.info("built indices for %i features" % len(indices))
# add psuedogenes and numts to dictionary of indices
indices["numts"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(numts_gtf)), with_value=True)
E.info("added index for numts")
indices["pseudogenes"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(pseudogenes_gtf)), with_value=True)
E.info("added index for pseudogenes")
# iterate through assembled transcripts, identify those that intersect
# with indexed sections
total_transcripts = set()
remove_transcripts = collections.defaultdict(set)
E.info("collecting genes to remove")
for gtf in GTF.transcript_iterator(
GTF.iterator(
IOTools.openFile(infile_abinitio))):
# remove those transcripts too short to be classified as lncRNAs
l = sum([x.end - x.start for x in gtf])
if l < min_length:
remove_transcripts[gtf[0].transcript_id].add("length")
# remove transcripts where one or more exon intersects one or more
# sections
for section in indices.keys():
for exon in gtf:
transcript_id = exon.transcript_id
total_transcripts.add(transcript_id)
if indices[section].contains(
exon.contig, exon.start, exon.end):
# check if any of the intersected intervals are on same
# stand as query exon
for interval in indices[section].get(
exon.contig, exon.start, exon.end):
# only remove transcripts where an exon is on
# the same strand as the interval it
# intersects
if exon.strand == interval[2].strand:
remove_transcripts[transcript_id].add(section)
E.info("removing %i out of %i transcripts" %
(len(remove_transcripts), len(total_transcripts)))
# Jethro - removed the automatic retention of transcripts that
# intersect known non-coding intervals regardless of
# strand. Instead, any removed transcript that also intersects a
# lncRNA on the same strand is now output to a separate gtf.
noncoding = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(refnoncoding_gtf)), with_value=True)
rej_gtf = os.path.join(os.path.dirname(outfile),
"lncrna_removed_nc_intersect.gtf.gz")
rej_gtf = IOTools.openFile(rej_gtf, "w")
for gtf in GTF.transcript_iterator(GTF.iterator(
IOTools.openFile(infile_abinitio))):
if gtf[0].transcript_id in list(remove_transcripts.keys()):
for exon in gtf:
if noncoding.contains(exon.contig, exon.start, exon.end):
if exon.strand in [x[2].strand for x in
list(noncoding.get(exon.contig,
exon.start,
exon.end))]:
for exon2 in IOTools.flatten(gtf):
rej_gtf.write(str(exon2) + "\n")
break
rej_gtf.close()
outf = open("lncrna_removed.tsv", "w")
outf.write("transcript_id" + "\t" + "removed" + "\n")
for x, y in remove_transcripts.items():
outf.write("%s\t%s\n" % (x, ",".join(y)))
outf.close()
# write out transcripts that are not in removed set
temp = P.getTempFile(".")
for entry in GTF.iterator(IOTools.openFile(infile_abinitio)):
if entry.transcript_id in remove_transcripts:
continue
temp.write("%s\n" % str(entry))
temp.close()
filename = temp.name
statement = '''cat %(filename)s | cgat gtf2gtf
--method=sort --sort-order=gene
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
os.unlink(temp.name)
def buildFilteredLncRNAGeneSet(flagged_gtf,
outfile,
genesets_previous,
filter_se="transcripts"):
'''
creates a filtered lincRNA geneset. This geneset will not include any
single exon lincRNA unless they have been seen previously i.e. it overlaps
a previously identified lincRNA
At this point we add a flag for whether the gene is a novel lncRNA or not
NB this is a large function and should be modified in the future
note genesets_previous provided as a list - priority is
placed on the first in the list
'''
# keep single and multi exonic lncRNA separate
previous_single = IndexedGenome.IndexedGenome()
previous_multi = IndexedGenome.IndexedGenome()
E.info("indexing previously identified lncRNA")
for prev in genesets_previous:
inf = IOTools.openFile(prev)
for transcript in GTF.transcript_iterator(GTF.iterator(inf)):
# use an indexed genome to assess novelty of lncRNA
if len(transcript) > 1:
for gtf in transcript:
previous_multi.add(gtf.contig,
gtf.start,
gtf.end,
[transcript[0].strand,
transcript[0].gene_id])
# add single exons
elif len(transcript) == 1:
previous_single.add(transcript[0].contig,
transcript[0].start,
transcript[0].end,
[transcript[0].strand,
transcript[0].gene_id])
# create sets for keeping and discarding genes
temp = P.getTempFile(dir=".")
keep = set()
known = set()
novel = set()
# iterate over the flagged GTF - flagged for exon status
E.info("checking for overlap with previously identified sets")
for transcript in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(flagged_gtf))):
gene_id = transcript[0].gene_id
# check if there is overlap in the known sets in order to add
# gene_status attribute
if transcript[0].exon_status == "m":
if previous_multi.contains(transcript[0].contig,
min([gtf.start for gtf in transcript]),
max([gtf.end for gtf in transcript])):
known.add(transcript[0].gene_id)
# add previous multi exonic lincRNA gene id
# to known set - to avoid addin gto gtf later
for gtf2 in previous_multi.get(transcript[0].contig,
min([gtf.start
for gtf in transcript]),
max([gtf.end
for gtf in transcript])):
known.add(gtf2[2][1])
else:
novel.add(transcript[0].gene_id)
elif filter_se == "locus" and transcript[0].exon_status_locus == "m":
if previous_multi.contains(transcript[0].contig,
min([gtf.start for gtf in transcript]),
max([gtf.end for gtf in transcript])):
known.add(transcript[0].gene_id)
# add previous multi exonic lincRNA gene id
# to known set - to avoid addin gto gtf later
for gtf2 in previous_multi.get(
transcript[0].contig,
min([gtf.start for gtf in transcript]),
max([gtf.end for gtf in transcript])):
known.add(gtf2[2][1])
else:
novel.add(transcript[0].gene_id)
else:
continue
E.info("writing filtered lncRNA geneset")
E.info("writing %i assembled but known" % len(known))
E.info("writing %i assembled novel" % len(novel))
# write out ones to keep from assembled data
for gtf in GTF.iterator(IOTools.openFile(flagged_gtf)):
if gtf.gene_id in known and gtf.exon_status == "m":
gtf.setAttribute("gene_status", "known")
temp.write("%s\n" % gtf)
elif gtf.gene_id in novel and gtf.exon_status == "m":
gtf.setAttribute("gene_status", "novel")
temp.write("%s\n" % gtf)
# write out ones to keep - from previous evidence
# i.e. anything single exon or anything
# multi-exonic non-overlapping the assembled set
# hierarchically done so add to 'done' if found as we iterate
# over the previous sets
done = IndexedGenome.IndexedGenome()
known_count = 0
for prev in genesets_previous:
inf = IOTools.openFile(prev)
for gene in GTF.flat_gene_iterator(GTF.iterator(inf)):
gene_id = gene[0].gene_id
# dont' write out ones that we build
if gene_id in known:
continue
elif done.contains(gene[0].contig,
min([gtf.start for gtf in gene]),
max([gtf.end for gtf in gene])):
continue
else:
for gtf in gene:
gtf.setAttribute("gene_status", "known")
temp.write("%s\n" % gtf)
known_count += 1
done.add(gene[0].contig, min([gtf.start for gtf in gene]), max(
[gtf.end for gtf in gene]), gene_id)
E.info("written %i from %s " % (known_count, ",".join(genesets_previous)))
temp.close()
filename = temp.name
statement = '''cat %(filename)s | cgat gtf2gtf
--method=sort --sort-order=transcript
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
def buildFinalLncRNAGeneSet(filteredLncRNAGeneSet,
cpc_table,
outfile,
filter_cpc=False,
cpc_threshold=1,
rename_lncRNA=False):
'''filters lncRNA set based on the coding potential as output from
the CPC
'''
if filter_cpc:
# set threshold for filtering transcripts on coding potential
cpc_thresh = float(cpc_threshold)
# get the transcripts that are designated as coding
coding_set = set()
dbh = sqlite3.connect("csvdb")
cc = dbh.cursor()
for transcript_id in cc.execute("SELECT transcript_id"
" FROM %s"
" WHERE CP_score > %f"
% (cpc_table, cpc_thresh)):
coding_set.add(transcript_id[0])
remove = set()
outf_coding = gzip.open("gtfs/cpc_removed.gtf.gz", "w")
for gtf in GTF.iterator(IOTools.openFile(filteredLncRNAGeneSet)):
if gtf.transcript_id in coding_set:
remove.add(gtf.gene_id)
outf_coding.write("%s\n" % gtf)
outf_coding.close()
else:
# create empty set
remove = set()
# get temporary file for built lncrna
temp = P.getTempFile(".")
# get temporary file for known lncrna
temp2 = P.getTempFile(".")
for gtf in GTF.iterator(IOTools.openFile(filteredLncRNAGeneSet)):
if gtf.gene_id in remove:
continue
if gtf.transcript_id.find("TCONS") != -1:
# output known and built transcripts separately
temp.write("%s\n" % gtf)
else:
temp2.write("%s\n" % gtf)
temp.close()
temp2.close()
filename = temp.name
filename2 = temp2.name
if rename_lncRNA:
filename3 = P.getTempFilename(".")
statement = ("cat %(filename)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log |"
" cgat gtf2gtf"
" --method=renumber-genes --pattern-identifier=NONCO%%i"
" --log=%(outfile)s.log |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log"
" > %(filename3)s;"
" cat %(filename2)s %(filename3)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=contig+gene"
" --log=%(outfile)s.log |"
" gzip > %(outfile)s")
P.run()
os.unlink(filename3)
else:
statement = ("cat %(filename)s %(filename2)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=contig+gene"
" --log=%(outfile)s.log |"
" gzip > %(outfile)s")
P.run()
########################################################
# counter classes
########################################################
class CounterExons:
'''
various classes for counting features in a gtf file
'''
def __init__(self, gtffile):
self.gtffile = GTF.iterator(IOTools.openFile(gtffile))
def count(self):
c = 0
for gtf in self.gtffile:
c += 1
return c
class CounterTranscripts(CounterExons):
def count(self):
c = 0
for transcript in GTF.transcript_iterator(self.gtffile):
c += 1
return c
class CounterGenes(CounterExons):
def count(self):
c = 0
for gtf in GTF.flat_gene_iterator(self.gtffile):
c += 1
return c
class CounterExonsPerTranscript(CounterExons):
'''
returns the average number of exons per transcript
'''
def count(self):
no_exons = []
for transcript in GTF.transcript_iterator(self.gtffile):
no_exons.append(len(transcript))
return float(sum(no_exons)) / len(no_exons)
class CounterExonsPerGene(CounterExons):
'''
returns the average number of exons per transcript
'''
def count(self):
no_exons = []
for gene in GTF.flat_gene_iterator(self.gtffile):
no_exons.append(len(gene))
return float(sum(no_exons)) / len(no_exons)
class CounterSingleExonTranscripts(CounterExons):
def count(self):
c = 0
for transcript in GTF.transcript_iterator(self.gtffile):
if len(transcript) == 1:
c += 1
return c
class CounterMultiExonTranscripts(CounterExons):
def count(self):
c = 0
for transcript in GTF.transcript_iterator(self.gtffile):
if len(transcript) > 1:
c += 1
return c
class CounterSingleExonGenes(CounterExons):
def count(self):
gene_ids = set()
for gene in GTF.flat_gene_iterator(self.gtffile):
if gene[0].exon_status_locus == "s":
gene_ids.add(gene[0].gene_id)
return len(gene_ids)
class CounterMultiExonGenes(CounterExons):
def count(self):
# note that this approach works when there are also single
# exon gtf entries from the same gene (doesn't work to use
# flat_gene_iterator)
gene_ids = set()
for gene in GTF.flat_gene_iterator(self.gtffile):
if gene[0].exon_status_locus == "m":
gene_ids.add(gene[0].gene_id)
return len(gene_ids)
def flagExonStatus(gtf_file, outfile):
'''
Adds two attributes to a gtf, the first species transcript exon status,
the second specifies gene exon status.
It is possible for genes to contain single-exon transcripts but not the
reciprocal.
'''
tmpf1 = P.getTempFilename(".")
tmpf2 = P.getTempFilename(".")
outf = IOTools.openFile(tmpf2, "w")
# sort infile
statement = ("zcat %(gtf_file)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log"
" > %(tmpf1)s")
P.run()
# create dictionary where key is transcript_id,
# value is a list of gtf_proxy objects
for gene in GTF.gene_iterator(GTF.iterator(IOTools.openFile(tmpf1))):
trans_dict = collections.defaultdict(list)
# load current gene into dictionary
for transcript in gene:
for exon in transcript:
trans_dict[exon.transcript_id].append(exon)
# set exon status for transcripts
for transcript in trans_dict.keys():
if len(trans_dict[transcript]) == 1:
exon_status = "s"
else:
exon_status = "m"
for exon in trans_dict[transcript]:
exon.setAttribute("exon_status", exon_status)
# collate transcript exon status for a gene
transcript_status = set()
for exons in trans_dict.values():
transcript_status.update([exon.exon_status for exon in exons])
# set gene_exon_status
if "m" in transcript_status:
gene_exon_status = "m"
else:
gene_exon_status = "s"
# write gene model to outfile
for transcript in trans_dict.values():
for exon in transcript:
exon.setAttribute("exon_status_locus", gene_exon_status)
outf.write(str(exon) + "\n")
outf.close()
statement = ("cat %(tmpf2)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=transcript"
" --log=%(outfile)s.log |"
" gzip > %(outfile)s")
P.run()
os.unlink(tmpf1)
os.unlink(tmpf2)
#############################################################
# classifying lincRNA TRANSCRIPT level relative to protein
# coding transcripts
#############################################################
def classifyLncRNA(lincRNA_gtf, reference, outfile, dist=2):
'''Classify lincRNA in terms of their proximity to protein coding
genes - creates indices for intervals on the fly - maybe should be
creating additional annotations:
antisense - GENE overlapping protein coding exons or introns on opposite
strand
antisense_upstream - GENE < Xkb from tss on opposite strand
antisense_downstream - GENE < Xkb from gene end on opposite strand
sense_upstream - GENE < Xkb from tss on same strand
sense_downstream - GENE < Xkb from gene end on same strand
intergenic - >Xkb from any protein coding gene
intronic - overlaps protein coding gene intron on same strand
antisense_intronic - overlaps protein coding intron on opposite strand
'''
# index the reference geneset
ref = {}
ref["ref"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(reference)), with_value=True)
# create index for intronic intervals
intron = IndexedGenome.IndexedGenome()
# create index for up and downstream intervals
plus_up = IndexedGenome.IndexedGenome()
plus_down = IndexedGenome.IndexedGenome()
minus_up = IndexedGenome.IndexedGenome()
minus_down = IndexedGenome.IndexedGenome()
# iterate over reference transcripts and create intervals in memory
outf = open("introns.bed", "w")
for transcript in GTF.transcript_iterator(GTF.iterator(
IOTools.openFile(reference))):
start = transcript[0].end
for i in range(1, len(transcript)):
intron.add(
transcript[i].contig, start,
transcript[i].start, transcript[i].strand)
start = transcript[i].end
# create up and downstream intervals on plus strand
if transcript[0].strand == "+":
plus_up.add(transcript[0].contig,
transcript[0].start - (dist * 1000),
transcript[0].start,
transcript[0].strand)
plus_down.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[len(transcript) - 1].end + (dist * 1000),
transcript[0].strand)
# create up and downstream intervals on minus strand
elif transcript[0].strand == "-":
minus_up.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[len(transcript) - 1].end + (dist * 1000),
transcript[0].strand)
minus_down.add(transcript[0].contig,
transcript[0].start - (dist * 1000),
transcript[0].start,
transcript[0].strand)
else:
print("WARNING: no strand")
" specified for %s" % transcript[0].transcript_id
# iterate over lincRNA transcripts
transcript_class = {}
for transcript in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(lincRNA_gtf))):
transcript_id = transcript[0].transcript_id
for gtf in transcript:
# antisense to protein coding gene
if ref["ref"].contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in ref["ref"].get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2].strand:
transcript_class[transcript_id] = "antisense"
# upstream sense and antisense - if up or downstream of a protein
# coding gene then that classification is prioritised over intronic
# classification. upstream is prioritised over downstream
# sense upstream is prioritised over antisense upstream
elif plus_up.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end) and not \
intron.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end):
for gtf2 in plus_up.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand == gtf2[2]:
transcript_class[transcript_id] = "sense_upstream"
elif gtf.strand != gtf2[2]:
transcript_class[transcript_id] = "antisense_upstream"
# and not intron.contains(transcript[0].contig,
# transcript[0].start, transcript[len(transcript) -1].end):
elif minus_up.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end):
for gtf2 in minus_up.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand == gtf2[2]:
transcript_class[transcript_id] = "sense_upstream"
elif gtf.strand != gtf2[2]:
transcript_class[transcript_id] = "antisense_upstream"
# downstream sense and antisense - downstream antisense is
# prioritised as less likely to be part of the coding transcript
elif plus_down.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end) and not \
intron.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end):
for gtf2 in plus_down.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
transcript_class[
transcript_id] = "antisense_downstream"
elif gtf.strand == gtf2[2]:
transcript_class[transcript_id] = "sense_downstream"
elif minus_down.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end) and not \
intron.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end):
for gtf2 in minus_down.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
transcript_class[
transcript_id] = "antisense_downstream"
elif gtf.strand == gtf2[2]:
transcript_class[transcript_id] = "sense_downstream"
# intronic sense and antisense - intronic antisense is prioritised
elif intron.contains(transcript[0].contig,
transcript[0].start,
transcript[len(transcript) - 1].end):
for gtf2 in intron.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
transcript_class[transcript_id] = "antisense_intronic"
elif gtf.strand == gtf2[2]:
transcript_class[transcript_id] = "sense_intronic"
# we have known lncRNA that overlap protein coding exons so
# classify these as well
elif ref["ref"].contains(gtf.contig, gtf.start, gtf.end):
if gtf.gene_status == "known":
transcript_class[transcript_id] = "sense_protein_coding"
# catch intergenic transcripts - based on the
# assumption that anything not falling into the above classes
# is intergenic
if transcript_id not in transcript_class:
transcript_class[transcript_id] = "intergenic"
outf = gzip.open(outfile, "w")
outf_unclassified = gzip.open("lncrna_unclassified.gtf.gz", "w")
for gtf in GTF.iterator(IOTools.openFile(lincRNA_gtf)):
if gtf.transcript_id in transcript_class:
gtf.source = transcript_class[gtf.transcript_id]
outf.write("%s\n" % gtf)
else:
outf_unclassified.write("%s\n" % gtf)
outf.close()
#############################################################
# classifying lincRNA GENE level relative to protein coding
# transcripts
#############################################################
def classifyLncRNAGenes(lincRNA_gtf, reference, outfile, dist=2):
# index the reference geneset
ref = {}
ref["ref"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(reference)), with_value=True)
# create index for intronic intervals
intron = IndexedGenome.IndexedGenome()
# create index for up and downstream intervals
plus_up = IndexedGenome.IndexedGenome()
plus_down = IndexedGenome.IndexedGenome()
minus_up = IndexedGenome.IndexedGenome()
minus_down = IndexedGenome.IndexedGenome()
# iterate over reference transcripts and create intervals in memory
for transcript in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(reference))):
start = transcript[0].end
for i in range(1, len(transcript)):
intron.add(
transcript[i].contig, start,
transcript[i].start, transcript[i].strand)
start = transcript[i].end
# create up and downstream intervals on plus strand
if transcript[0].strand == "+":
plus_up.add(transcript[0].contig,
transcript[0].start - (dist * 1000),
transcript[0].start,
transcript[0].strand)
plus_down.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[len(transcript) - 1].end + (dist * 1000),
transcript[0].strand)
# create up and downstream intervals on minus strand
elif transcript[0].strand == "-":
minus_up.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[len(transcript) - 1].end + (dist * 1000),
transcript[0].strand)
minus_down.add(transcript[0].contig,
transcript[0].start - (dist * 1000),
transcript[0].start,
transcript[0].strand)
else:
print(("WARNING: no strand"
" specified for %s" % transcript[0].transcript_id))
# iterate over lincRNA genes
outf_introns = os.path.join(os.path.dirname(outfile),
"sense_intronic_removed.gtf.gz")
outf_introns = gzip.open(outf_introns, "w")
gene_class = {}
for gtf in GTF.merged_gene_iterator(GTF.iterator(
IOTools.openFile(lincRNA_gtf))):
gene_id = gtf.gene_id
# the first classification resolves any gene
# that overlaps a gene. We don't mind whether it
# overlaps protein coding gene exons or introns
if ref["ref"].contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in ref["ref"].get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
gene_class[gene_id] = "antisense"
else:
gene_class[gene_id] = "sense"
# remove intronic sense transcripts at this point
elif intron.contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in intron.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand == gtf2[2]:
outf_introns.write("%s\n" % gtf)
else:
gene_class[gene_id] = "antisense"
# the second classification resolves sense and antisense genes up and
# downstream of protein coding genes - nb having some problems with the
# merged gene iterator
elif plus_up.contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in plus_up.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
if gene_id in gene_class:
continue
gene_class[gene_id] = "antisense_upstream"
else:
if gene_id in gene_class:
continue
gene_class[gene_id] = "sense_upstream"
elif minus_up.contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in minus_up.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
if gene_id in gene_class:
continue
gene_class[gene_id] = "antisense_upstream"
else:
if gene_id in gene_class:
continue
gene_class[gene_id] = "sense_upstream"
elif plus_down.contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in plus_down.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
if gene_id in gene_class:
continue
gene_class[gene_id] = "antisense_downstream"
else:
if gene_id in gene_class:
continue
gene_class[gene_id] = "sense_downstream"
elif minus_down.contains(gtf.contig, gtf.start, gtf.end):
for gtf2 in minus_down.get(gtf.contig, gtf.start, gtf.end):
if gtf.strand != gtf2[2]:
if gene_id in gene_class:
continue
gene_class[gene_id] = "antisense_downstream"
else:
if gene_id in gene_class:
continue
gene_class[gene_id] = "sense_downstream"
# the third classification assumes all genes have been
# classified leaving intergenic genes
else:
gene_class[gene_id] = "intergenic"
outf = gzip.open(outfile, "w")
for gtf in GTF.iterator(IOTools.openFile(lincRNA_gtf)):
if gtf.gene_id in gene_class:
gtf.source = gene_class[gtf.gene_id]
outf.write("%s\n" % gtf)
outf.close()
outf_introns.close()
##########################################################################
##########################################################################
# An alternative method for classifying LncRNAs relative to supplied geneset
##########################################################################
def write_to_temp(tempfile, interval_list, transcript, check_strand=True):
if check_strand:
for interval in interval_list:
if interval[2][6] == transcript[0].strand:
tempfile.write(transcript[0].gene_id + "\t" +
str(interval[0]) + "\t" +
str(interval[1]) + "\t" +
str(transcript[0]) + "\t" +
"\t".join(interval[2]) + "\n")
else:
for interval in interval_list:
tempfile.write(transcript[0].gene_id + "\t" +
str(interval[0]) + "\t" +
str(interval[1]) + "\t" +
str(transcript[0]) + "\t" +
"\t".join(interval[2]) + "\n")
def reClassifyLncRNAGenes(lncRNA_gtf,
reference_gtf,
outfile,
upstr_dist=5,
dstr_dist=5,
wdir="."):
"""
This re-write of classifyLncRNAGenes() does not throw out intronic loci,
but labels them as either sense-intronic or sense-overlap.
It also fixes the bug that cause sense gene-models encompassing
reference exons to be output as antisense.
Because lncRNA boundaries intersect multiple intervals in indexes, rather
than classifying each lncRNA multiple times, lncRNA strand is instead
compared to a list of interval strand values, if any of these are sense,
then the lncRNA is classified as sense etc.
Sense-intronic: when lncRNA loci start and end are contained within a
single intron. Sense-overlap: when lncRNA loci start and end are in
different introns. Note that different introns do not necessarily
come from different gene-models.
"""
# index exons in the reference gene-set
ref_index = IndexedGenome.IndexedGenome()
for exon in GTF.iterator(IOTools.openFile(reference_gtf)):
ref_index.add(exon.contig, exon.start, exon.end, str(exon).split())
# create index for all other intervals to be classified
intron = IndexedGenome.IndexedGenome()
plus_up = IndexedGenome.IndexedGenome()
plus_down = IndexedGenome.IndexedGenome()
minus_up = IndexedGenome.IndexedGenome()
minus_down = IndexedGenome.IndexedGenome()
# iterate over reference transcripts and create intervals in memory
ref_file = IOTools.openFile(reference_gtf)
for transcript in GTF.transcript_iterator(GTF.iterator(ref_file)):
start = transcript[0].end
for i in range(1, len(transcript)):
intron.add(transcript[i].contig,
start,
transcript[i].start,
str(transcript[i]).split())
start = transcript[i].end
# create up and downstream intervals on plus strand
if transcript[0].strand == "+":
plus_up.add(transcript[0].contig,
transcript[0].start - (upstr_dist * 1000),
transcript[0].start,
str(transcript[0]).split())
plus_down.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[
len(transcript) - 1].end + (dstr_dist * 1000),
str(transcript[len(transcript) - 1]).split())
# create up and downstream intervals on minus strand
elif transcript[0].strand == "-":
minus_up.add(transcript[0].contig,
transcript[len(transcript) - 1].end,
transcript[len(transcript) - 1].end +
(upstr_dist * 1000),
str(transcript[len(transcript) - 1]).split())
minus_down.add(transcript[0].contig,
transcript[0].start - (dstr_dist * 1000),
transcript[0].start,
str(transcript[0]).split())
else:
E.warn("WARNING: no strand specified for %s" %
transcript[0].transcript_id)
# create single representative transcript for each lncRNA gene_id
merged_lncRNA_gtf = P.getTempFilename(wdir)
to_cluster = False
statement = ("zcat %(lncRNA_gtf)s |"
" cgat gtf2gtf"
" --method=sort --sort-order=gene"
" --log=%(outfile)s.log |"
" cgat gtf2gtf"
" --method=merge-exons"
" --log=%(outfile)s.log"
" > %(merged_lncRNA_gtf)s")
P.run()
# create a temp directory containing the indexed intervals used to classify
# the lncRNA transcripts created (for debugging purposes)
# create a temporary count of # of gene_models in each category
tempdir = P.getTempDir(wdir)
E.info("intersecting intervals are being written to %s"
% os.path.abspath(tempdir))
temp_file_names = ["sense",
"sense_intronic",
"sense_overlap",
"antisense",
"sense_downstream",
"sense_upstream",
"antisense_downstream",
"antisense_upstream",
"intergenic"]
temp_files = {}
temp_count = {}
for handle in temp_file_names:
temp_count[handle] = 0
temp_files[handle] = IOTools.openFile(os.path.join(tempdir,
handle), "w")
# iterate through the representative (i.e. merged) lncRNA transcripts
# each lncRNA transcript is classified only once.
# In situations where a lncRNA fits > 1 classification, priority is:
# (sense > antisense)
# & (overlap_exons > overlap_introns > downstream > upstream > intergenic)
lnc_file = IOTools.openFile(merged_lncRNA_gtf)
gene_class = {} # dictionary of gene_id : classification
input_transcripts = 0 # keep track of # transcripts in lncRNA_gtf
for transcript in GTF.transcript_iterator(GTF.iterator(lnc_file)):
input_transcripts += 1
gene_id = transcript[0].gene_id
strand = transcript[0].strand
# create lists of indexed intervals that intersect transcript exons
overlap_list = []
intron_list = []
plus_down_list = []
minus_down_list = []
plus_up_list = []
minus_up_list = []
for exon in transcript:
if exon.contig in list(ref_index.mIndex.keys()):
overlap_list.extend([x for x in list(ref_index.get(exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in reference exon index "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
if exon.contig in list(intron.mIndex.keys()):
intron_list.extend([x for x in list(intron.get(exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in reference intron index, "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
if exon.contig in list(plus_down.mIndex.keys()):
plus_down_list.extend([x for x in list(plus_down.get(
exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in plus downstream index, "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
if exon.contig in list(minus_down.mIndex.keys()):
minus_down_list.extend([x for x in list(minus_down.get(
exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in minus downstream index, "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
if exon.contig in list(plus_up.mIndex.keys()):
plus_up_list.extend([x for x in list(plus_up.get(exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in plus upstream index, "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
if exon.contig in list(minus_up.mIndex.keys()):
minus_up_list.extend([x for x in list(minus_up.get(exon.contig,
exon.start,
exon.end))])
else:
E.warn("Contig %s not in minus upstream index, "
"failed to retrieve intervals for %s" % (exon.contig,
exon.gene_id))
# check if any exon in lncRNA intersects an reference exon
if overlap_list:
# if the intersecting exons are on the same strand,
# classify lncRNA as sense.
if strand in [x[2][6] for x in overlap_list]:
gene_class[gene_id] = "sense"
write_to_temp(temp_files[gene_class[gene_id]],
overlap_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# otherwise check if lncRNA has sense overlap with a reference
# intron
elif intron_list and strand in [x[2][6] for x in intron_list]:
last = len(transcript) - 1
start_list = [(x[0], x[1]) for x in list(intron.get(
transcript[0].contig,
transcript[0].start,
transcript[0].start + 1)) if x[2][6] == strand]
end_list = [(x[0], x[1]) for x in list(intron.get(
transcript[last].contig,
transcript[last].end,
transcript[last].end + 1)) if x[2][6] == strand]
# if start and end of transcript are within the same sense
# introns, then lncRNA is classified as 'sense_intronic'
if set(start_list) == set(end_list):
gene_class[gene_id] = "sense_intronic"
write_to_temp(temp_files[gene_class[gene_id]],
intron_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# if start/end are within different sense introns,
# then lncRNA is classified as 'sense overlap'
else:
gene_class[gene_id] = "sense_overlap"
write_to_temp(temp_files[gene_class[gene_id]],
intron_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense downstream on the plus strand...
elif plus_down_list and strand in [x[2][6] for x in
plus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense downstream on the minus strand...
elif minus_down_list and strand in [x[2][6] for x in
minus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the plus strand...
elif plus_up_list and strand in [x[2][6] for x in plus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the minus strand...
elif minus_up_list and strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...if none of the above... classify as antisense
else:
gene_class[gene_id] = "antisense"
write_to_temp(temp_files[gene_class[gene_id]],
overlap_list,
transcript,
check_strand=False)
temp_count[gene_class[gene_id]] += 1
# if lncRNA doesn't intersect a reference exon,
# check if it overlaps a reference intron
elif intron_list:
if strand in [x[2][6] for x in intron_list]:
last = len(transcript) - 1
start_list = [(x[0], x[1]) for x in list(intron.get(
transcript[0].contig,
transcript[0].start,
transcript[0].start + 1)) if x[2][6] == strand]
end_list = [(x[0], x[1]) for x in list(intron.get(
transcript[last].contig,
transcript[last].end,
transcript[last].end + 1)) if x[2][6] == strand]
# if start and end of transcript are within the same sense
# introns, then lncRNA is classified as 'sense_intronic'
if set(start_list) == set(end_list):
gene_class[gene_id] = "sense_intronic"
write_to_temp(temp_files[gene_class[gene_id]],
intron_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# if start/end are within different sense introns,
# then lncRNA is classified as 'sense overlap'
else:
gene_class[gene_id] = "sense_overlap"
write_to_temp(temp_files[gene_class[gene_id]],
intron_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense downstream on the plus strand...
elif plus_down_list and strand in [x[2][6] for x in
plus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense downstream on the minus strand...
elif minus_down_list and strand in [x[2][6] for x in
minus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the plus strand...
elif plus_up_list and strand in [x[2][6] for x in plus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream in the minus strand...
elif minus_up_list and strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...if none of the above, lncRNAs intersecting introns on
# the opposite strand are classified as antisense
else:
gene_class[gene_id] = "antisense"
write_to_temp(temp_files[gene_class[gene_id]],
intron_list,
transcript,
check_strand=False)
temp_count[gene_class[gene_id]] += 1
# if lncRNA doesn't intersect reference introns or exons...
# check if it's downstream on the plus strand...
elif plus_down_list:
# ... check if lncRNA is sense downstream...
if strand in [x[2][6] for x in plus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense downstream on the minus strand...
elif minus_down_list and strand in [x[2][6] for x in
minus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense usptream on the plus strand...
elif plus_up_list and strand in [x[2][6] for x in plus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the pluse strand...
elif minus_up_list and strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# if none of the above, lncRNA is classified as
# antisense_downstream
else:
gene_class[gene_id] = "antisense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_down_list,
transcript,
check_strand=False)
temp_count[gene_class[gene_id]] += 1
# check if lncRNA is downstream on the minus strand...
elif minus_down_list:
# check if lncRNA is sense downstream
if strand in [x[2][6] for x in minus_down_list]:
gene_class[gene_id] = "sense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_down_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the plus strand...
elif plus_up_list and strand in [x[2][6] for x in plus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the minus strand...
elif minus_up_list and strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# if none of the above, lncRNA is classified as
# antisense_downstream
else:
gene_class[gene_id] = "antisense_downstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_down_list,
transcript,
check_strand=False)
temp_count[gene_class[gene_id]] += 1
# check if lncRNA is upstream on the plus strand...
elif plus_up_list:
# check if lncRNA is sense upstream...
if strand in [x[2][6] for x in plus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# ...check if lncRNA is sense upstream on the plus strand...
elif minus_up_list and strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# if none of the above, lncRNA is classified as
# antisense upstream
else:
gene_class[gene_id] = "antisense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
plus_up_list,
transcript,
check_strand=False)
temp_count[gene_class[gene_id]] += 1
# check if lncRNA is upstream on the minus strand...
elif minus_up_list:
# check if lncRNA is sense upstream...
if strand in [x[2][6] for x in minus_up_list]:
gene_class[gene_id] = "sense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# otherwise classify as antisense upstream
else:
gene_class[gene_id] = "antisense_upstream"
write_to_temp(temp_files[gene_class[gene_id]],
minus_up_list,
transcript)
temp_count[gene_class[gene_id]] += 1
# lncRNA that do not fall into any of the above categories
# are classified as intergenic
else:
gene_class[gene_id] = "intergenic"
temp_files[gene_class[gene_id]].write(str(transcript[0]) + "\n")
temp_count[gene_class[gene_id]] += 1
# check that all the numbers add up
E.info("Number of lncRNA loci falling into each category are as follows:")
for key, value in temp_count.items():
print((key + "\t" + str(value)))
total_classified = sum(temp_count.values())
E.info("Total number of lncRNA loci classified: %i" % total_classified)
E.info("Total number of lncRNA loci in input gtf: %i" % input_transcripts)
# sanity check:
assert total_classified == input_transcripts, (
"Not all lncRNAs in input gtf were successfully classified")
# close the tempfiles
for handle in temp_file_names:
temp_files[handle].close()
# write the genes plus their classification to the outfile
outf = IOTools.openFile(outfile, "w")
for gtf in GTF.iterator(IOTools.openFile(lncRNA_gtf)):
if gtf.gene_id in gene_class:
gtf.source = gene_class[gtf.gene_id]
outf.write(str(gtf) + "\n")
else:
E.info("Warning the gene_id %s is not classified" % gtf.gene_id)
outf.close()
os.unlink(merged_lncRNA_gtf)
return tempdir
##########################################################################
##########################################################################
##########################################################################
# Extract pairwise MAF alignments
##########################################################################
# This section of the pipeline makes use of galaxy's maf_utilties (written by
# Dan Blankenberg - see galaxy-dist/lib/galaxy/tools/util)
# for indexing maf files and for retrieving maf blocks that
# intersect gene models.
# Because maf_utilities.py is not available outside of galaxy,
# required functions have been copied directly (below).
# These functions have not been altered in the hope that maf_utilities
# will one day be available as a stand-alone module.
# The following classes/functions have been lifted directly from maf_utilities:
# RegionAlignment()
# GenomicRegionAlignment()
# SplicedAlignment()
# build_maf_index_species_chromosomes()
# build_maf_index()
# component_overlaps_region()
# chop_block_by_region()
# orient_block_by_region()
# iter_blocks_split_by_species()
# reduce_block_by_primary_genome()
# fill_region_alignment()
# get_spliced_region_alignment()
# get_starts_ends_fields_from_gene_bed()
# iter_components_by_src()
GAP_CHARS = ['-']
SRC_SPLIT_CHAR = '.'
def src_split(src):
fields = src.split(SRC_SPLIT_CHAR, 1)
spec = fields.pop(0)
if fields:
chrom = fields.pop(0)
else:
chrom = spec
return spec, chrom
# an object corresponding to a reference layered alignment
class RegionAlignment(object):
DNA_COMPLEMENT = str.maketrans("ACGTacgt", "TGCAtgca")
MAX_SEQUENCE_SIZE = sys.maxsize # Maximum length of sequence allowed
def __init__(self, size, species=[]):
assert size <= self.MAX_SEQUENCE_SIZE, ("Maximum length allowed for an"
" individual sequence has been"
" exceeded (%i > %i)." % (
size,
self.MAX_SEQUENCE_SIZE))
self.size = size
self.sequences = {}
if not isinstance(species, list):
species = [species]
for spec in species:
self.add_species(spec)
# add a species to the alignment
def add_species(self, species):
# make temporary sequence files
self.sequences[species] = tempfile.TemporaryFile()
self.sequences[species].write("-" * self.size)
# returns the names for species found in alignment, skipping names as
# requested
def get_species_names(self, skip=[]):
if not isinstance(skip, list):
skip = [skip]
names = list(self.sequences.keys())
for name in skip:
try:
names.remove(name)
except:
pass
return names
# returns the sequence for a species
def get_sequence(self, species):
self.sequences[species].seek(0)
return self.sequences[species].read()
# returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement(self, species):
complement = [base for base in self.get_sequence(
species).translate(self.DNA_COMPLEMENT)]
complement.reverse()
return "".join(complement)
# sets a position for a species
def set_position(self, index, species, base):
if len(base) != 1:
raise Exception("A genomic position can only have a length of 1.")
return self.set_range(index, species, base)
# sets a range for a species
def set_range(self, index, species, bases):
if index >= self.size or index < 0:
raise Exception(
"Your index (%i) is out of range (0 - %i)." % (index,
self.size - 1))
if len(bases) == 0:
raise Exception(
"A set of genomic positions can only have a positive length.")
if species not in list(self.sequences.keys()):
self.add_species(species)
self.sequences[species].seek(index)
self.sequences[species].write(bases)
# Flush temp file of specified species, or all species
def flush(self, species=None):
if species is None:
species = list(self.sequences.keys())
elif not isinstance(species, list):
species = [species]
for spec in species:
self.sequences[spec].flush()
class GenomicRegionAlignment(RegionAlignment):
def __init__(self, start, end, species=[]):
RegionAlignment.__init__(self, end - start, species)
self.start = start
self.end = end
class SplicedAlignment(object):
DNA_COMPLEMENT = str.maketrans("ACGTacgt", "TGCAtgca")
def __init__(self, exon_starts, exon_ends, species=[]):
if not isinstance(exon_starts, list):
exon_starts = [exon_starts]
if not isinstance(exon_ends, list):
exon_ends = [exon_ends]
assert len(exon_starts) == len(
exon_ends), "Number of starts does not match the number of sizes."
self.exons = []
for i in range(len(exon_starts)):
self.exons.append(
GenomicRegionAlignment(exon_starts[i], exon_ends[i], species))
# returns the names for species found in alignment, skipping names as
# requested
def get_species_names(self, skip=[]):
if not isinstance(skip, list):
skip = [skip]
names = []
for exon in self.exons:
for name in exon.get_species_names(skip=skip):
if name not in names:
names.append(name)
return names
# returns the sequence for a species
def get_sequence(self, species):
sequence = tempfile.TemporaryFile()
for exon in self.exons:
if species in exon.get_species_names():
sequence.write(exon.get_sequence(species))
else:
sequence.write("-" * exon.size)
sequence.seek(0)
return sequence.read()
# returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement(self, species):
complement = [base for base in self.get_sequence(
species).translate(self.DNA_COMPLEMENT)]
complement.reverse()
return "".join(complement)
# Start and end of coding region
@property
def start(self):
return self.exons[0].start
@property
def end(self):
return self.exons[-1].end
def build_maf_index_species_chromosomes(filename, index_species=None):
species = []
species_chromosomes = {}
indexes = bx.interval_index_file.Indexes()
blocks = 0
try:
maf_reader = bx.align.maf.Reader(open(filename))
while True:
pos = maf_reader.file.tell()
block = next(maf_reader)
if block is None:
break
blocks += 1
for c in block.components:
spec = c.src
chrom = None
if "." in spec:
spec, chrom = spec.split(".", 1)
if spec not in species:
species.append(spec)
species_chromosomes[spec] = []
if chrom and chrom not in species_chromosomes[spec]:
species_chromosomes[spec].append(chrom)
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int(forward_strand_start)
forward_strand_end = int(forward_strand_end)
except ValueError:
# start and end are not integers, can't add component
# to index, goto next component
continue
# this likely only occurs when parse_e_rows is True?
# could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
# require positive length; i.e. certain lines have
# start = end = 0 and cannot be indexed
indexes.add(c.src,
forward_strand_start,
forward_strand_end,
pos,
max=c.src_size)
except Exception as e:
# most likely a bad MAF
log.debug('Building MAF index on %s failed: %s' % (filename, e))
return (None, [], {}, 0)
return (indexes, species, species_chromosomes, blocks)
# builds and returns ( index, index_filename ) for specified maf_file
def build_maf_index(maf_file, species=None):
indexes, found_species, species_chromosomes, blocks = \
build_maf_index_species_chromosomes(maf_file, species)
if indexes is not None:
fd, index_filename = tempfile.mkstemp()
out = os.fdopen(fd, 'w')
indexes.write(out)
out.close()
return (bx.align.maf.Indexed(maf_file,
index_filename=index_filename,
keep_open=True,
parse_e_rows=False),
index_filename)
return (None, None)
def component_overlaps_region(c, region):
if c is None:
return False
start, end = c.get_forward_strand_start(), c.get_forward_strand_end()
if region.start >= end or region.end <= start:
return False
return True
def chop_block_by_region(block, src, region, species=None, mincols=0):
# This chopping method was designed to maintain consistency with
# how start/end padding gaps have been working in Galaxy thus far:
# behavior as seen when forcing blocks to be '+' relative to src
# sequence (ref) and
# using block.slice_by_component( ref, slice_start, slice_end )
# whether-or-not this is the 'correct' behavior is questionable,
# but this will at least maintain consistency
# comments welcome
slice_start = block.text_size # max for the min()
slice_end = 0 # min for the max()
old_score = block.score # save old score for later use
# We no longer assume only one occurance of src per block, so we need to
# check them all
for c in iter_components_by_src(block, src):
if component_overlaps_region(c, region):
if c.text is not None:
rev_strand = False
if c.strand == "-":
# We want our coord_to_col coordinates to be returned from
# positive stranded component
rev_strand = True
c = c.reverse_complement()
start = max(region.start, c.start)
end = min(region.end, c.end)
start = c.coord_to_col(start)
end = c.coord_to_col(end)
if rev_strand:
# need to orient slice coordinates to the original block
# direction
slice_len = end - start
end = len(c.text) - start
start = end - slice_len
slice_start = min(start, slice_start)
slice_end = max(end, slice_end)
if slice_start < slice_end:
block = block.slice(slice_start, slice_end)
if block.text_size > mincols:
# restore old score, may not be accurate, but it is better than 0
# for everything?
block.score = old_score
if species is not None:
block = block.limit_to_species(species)
block.remove_all_gap_columns()
return block
return None
def orient_block_by_region(block, src, region, force_strand=None):
# loop through components matching src,
# make sure each of these components overlap region
# cache strand for each of overlaping regions
# if force_strand / region.strand not in strand cache, reverse complement
# we could have 2 sequences with same src, overlapping region, on
# different strands, this would cause no reverse_complementing
strands = [c.strand for c in iter_components_by_src(
block, src) if component_overlaps_region(c, region)]
if strands and (force_strand is None and region.strand not in strands) or \
(force_strand is not None and force_strand not in strands):
block = block.reverse_complement()
return block
# split a block into multiple blocks with all combinations of a species
# appearing only once per block
def iter_blocks_split_by_species(block, species=None):
def __split_components_by_species(components_by_species, new_block):
if components_by_species:
# more species with components to add to this block
components_by_species = deepcopy(components_by_species)
spec_comps = components_by_species.pop(0)
for c in spec_comps:
newer_block = deepcopy(new_block)
newer_block.add_component(deepcopy(c))
for value in \
__split_components_by_species(components_by_species,
newer_block):
yield value
else:
# no more components to add, yield this block
yield new_block
# divide components by species
spec_dict = {}
if not species:
species = []
for c in block.components:
spec, chrom = src_split(c.src)
if spec not in spec_dict:
spec_dict[spec] = []
species.append(spec)
spec_dict[spec].append(c)
else:
for spec in species:
spec_dict[spec] = []
for c in iter_components_by_src_start(block, spec):
spec_dict[spec].append(c)
empty_block = bx.align.Alignment(score=block.score, attributes=deepcopy(
block.attributes)) # should we copy attributes?
empty_block.text_size = block.text_size
# call recursive function to split into each combo of spec/blocks
for value in __split_components_by_species(list(spec_dict.values()),
empty_block):
# restore original component order
sort_block_components_by_block(value, block)
yield value
# reduces a block to only positions exisiting in the src provided
def reduce_block_by_primary_genome(block, species, chromosome, region_start):
# returns ( startIndex, {species:texts}
# where texts' contents are reduced to only positions existing in the
# primary genome
src = "%s.%s" % (species, chromosome)
ref = block.get_component_by_src(src)
start_offset = ref.start - region_start
species_texts = {}
for c in block.components:
species_texts[c.src.split('.')[0]] = list(c.text)
# remove locations which are gaps in the primary species, starting from
# the downstream end
for i in range(len(species_texts[species]) - 1, -1, -1):
if species_texts[species][i] == '-':
for text in list(species_texts.values()):
text.pop(i)
for spec, text in list(species_texts.items()):
species_texts[spec] = ''.join(text)
return (start_offset, species_texts)
def fill_region_alignment(alignment,
index,
primary_species,
chrom,
start,
end,
strand='+',
species=None,
mincols=0,
overwrite_with_gaps=True):
region = bx.intervals.Interval(start, end)
region.chrom = chrom
region.strand = strand
primary_src = "%s.%s" % (primary_species, chrom)
# Order blocks overlaping this position by score, lowest first
blocks = []
for block, idx, offset in \
index.get_as_iterator_with_index_and_offset(primary_src,
start,
end):
score = float(block.score)
for i in range(0, len(blocks)):
if score < blocks[i][0]:
blocks.insert(i, (score, idx, offset))
break
else:
blocks.append((score, idx, offset))
# gap_chars_tuple = tuple( GAP_CHARS )
gap_chars_str = ''.join(GAP_CHARS)
# Loop through ordered blocks and layer by increasing score
for block_dict in blocks:
# need to handle each occurance of sequence in block seperately
for block in iter_blocks_split_by_species(
block_dict[1].get_at_offset(block_dict[2])):
if component_overlaps_region(
block.get_component_by_src(primary_src), region):
block = chop_block_by_region(
block, primary_src, region, species, mincols) # chop block
block = orient_block_by_region(
block, primary_src, region) # orient block
start_offset, species_texts = reduce_block_by_primary_genome(
block, primary_species, chrom, start)
for spec, text in list(species_texts.items()):
# we should trim gaps from both sides, since these are not
# positions in this species genome (sequence)
text = text.rstrip(gap_chars_str)
gap_offset = 0
# python2.4 doesn't accept a tuple for .startswith()
while True in [text.startswith(
gap_char) for gap_char in GAP_CHARS]:
# while text.startswith( gap_chars_tuple ):
gap_offset += 1
text = text[1:]
if not text:
break
if text:
if overwrite_with_gaps:
alignment.set_range(
start_offset + gap_offset, spec, text)
else:
for i, char in enumerate(text):
if char not in GAP_CHARS:
alignment.set_position(
start_offset + gap_offset + i,
spec,
char)
return alignment
def get_spliced_region_alignment(index,
primary_species,
chrom,
starts,
ends,
strand='+',
species=None,
mincols=0,
overwrite_with_gaps=True):
"""
Returns a filled spliced region alignment for specified region with start
and end lists.
"""
# create spliced alignment object
if species is not None:
alignment = SplicedAlignment(starts, ends, species)
else:
alignment = SplicedAlignment(starts, ends, [primary_species])
for exon in alignment.exons:
fill_region_alignment(exon,
index,
primary_species,
chrom,
exon.start,
exon.end,
strand,
species,
mincols,
overwrite_with_gaps)
return alignment
# read a GeneBed file, return list of starts, ends, raw fields
def get_starts_ends_fields_from_gene_bed(line):
# Starts and ends for exons
starts = []
ends = []
fields = line.split()
# Requires atleast 12 BED columns
if len(fields) < 12:
raise Exception("Not a proper 12 column BED line (%s)." % line)
chrom = fields[0]
tx_start = int(fields[1])
tx_end = int(fields[2])
name = fields[3]
strand = fields[5]
if strand != '-':
strand = '+' # Default strand is +
cds_start = int(fields[6])
cds_end = int(fields[7])
# Calculate and store starts and ends of coding exons
region_start, region_end = cds_start, cds_end
exon_starts = list(map(int, fields[11].rstrip(',\n').split(',')))
exon_starts = list(map((lambda x: x + tx_start), exon_starts))
exon_ends = list(map(int, fields[10].rstrip(',').split(',')))
exon_ends = list(map((lambda x, y: x + y), exon_starts, exon_ends))
for start, end in zip(exon_starts, exon_ends):
start = max(start, region_start)
end = min(end, region_end)
if start < end:
starts.append(start)
ends.append(end)
return (starts, ends, fields)
def iter_components_by_src(block, src):
for c in block.components:
if c.src == src:
yield c
def sort_block_components_by_block(block1, block2):
# orders the components in block1 by the index of the component in block2
# block1 must be a subset of block2
# occurs in-place
return block1.components.sort(
cmp=lambda x, y: block2.components.index(x) -
block2.components.index(y))
##########################################################################
# CGAT functions for extracting MAF alignments
##########################################################################
def gtfToBed12(infile, outfile, model):
"""
Convert a gtf file to bed12 format.
"""
model = "gene"
outfile = IOTools.openFile(outfile, "w")
for all_exons in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(infile, "r"))):
chrom = all_exons[0].contig
# GTF.iterator returns start co-ordinates as zero-based
start = str(all_exons[0].start)
end = str(all_exons[len(all_exons) - 1].end)
# if model == "gene":
# name = all_exons[0].gene_id
# elif model == "transcript":
# name = all_exons[0].transcript_id
name = all_exons[0].gene_id + "__" + all_exons[0].transcript_id
# else:
# raise ValueError( "model must either be gene or transcript" )
score = "0"
strand = all_exons[0].strand
thickStart = start
thickEnd = end
colourRGB = "0"
blockCount = str(len(all_exons))
sizes = []
starts = []
for exon in all_exons:
blockSize = str(exon.end - (exon.start))
sizes.append(blockSize)
# start + blockStart should return a zero-based co-ordinate for the
# exon start
blockStart = str((exon.start) - int(start))
starts.append(str(blockStart))
blockSizes = ','.join(sizes)
blockStarts = ','.join(starts)
outfile.write(chrom + "\t" +
start + "\t" +
end + "\t" +
name + "\t" +
score + "\t" +
strand + "\t" +
thickStart + "\t" +
thickEnd + "\t" +
colourRGB + "\t" +
blockCount + "\t" +
blockSizes + "\t" +
blockStarts + "\n")
outfile.close()
def filterMAF(infile, outfile, removed, filter_alignments=False):
"""
Iterates through the MAF file.
If filter_alignments == int, then will remove MAF
blocks for which length < int.
"""
inf = bx.align.maf.Reader(open(infile))
outf = bx.align.maf.Writer(open(outfile, "w"))
outf_rj = bx.align.maf.Writer(open(removed, "w"))
removed = 0
included = 0
if filter_alignments:
for block in inf:
if len([x for x in block.column_iter() if x[0] != "-"]) < \
int(filter_alignments):
removed += 1
outf_rj.write(block)
else:
included += 1
outf.write(block)
else:
for block in inf:
outf.write(block)
outf.close()
outf_rj.close()
return (removed, included)
def extractGeneBlocks(bedfile,
maf_file,
outfile,
primary_species,
secondary_species):
"""
Is based on Dan Blankenburg's interval_to_maf_merged_fasta.py
Receives a bed12 containing intervals of interest, and maf containing
pairwise genomic alignment. Iterates through bed intervals and outputs
intervals in fasta format.
See comments in maf_utilities.py: maf blocks are always extracted in a
positive strand orientation relative to the src alignment, reverse
complementing is then done using method AlignedSequence method
get_sequence_reverse_complement.
MAF file is indexed on the fly using bx.align.maf.MultiIndexed
"""
index, index_filename = build_maf_index(maf_file)
output = IOTools.openFile(outfile, "w")
regions_extracted = 0
# iterate through intervals
for line_count, line in enumerate(IOTools.openFile(bedfile).readlines()):
try:
# retrieve exon starts & ends, plus all gtf fields
starts, ends, fields = get_starts_ends_fields_from_gene_bed(line)
# create spliced alignment (N.B. strand always +ve)
# for pep8
spc = [primary_species, secondary_species]
alignment = get_spliced_region_alignment(index,
primary_species,
fields[0],
starts,
ends,
strand='+',
species=spc,
mincols=0,
overwrite_with_gaps=False)
primary_name = secondary_name = fields[3]
alignment_strand = fields[5]
except Exception as e:
print("Error loading exon positions from input line %i: %s" %
(line_count, e))
break
# write the stiched sequence to outfile in the correct orientation
output.write(">%s.%s\n" % (primary_species, primary_name))
if alignment_strand == "-":
output.write(
alignment.get_sequence_reverse_complement(primary_species))
else:
output.write(alignment.get_sequence(primary_species))
output.write("\n")
output.write(">%s.%s\n" % (secondary_species, secondary_name))
if alignment_strand == "-":
output.write(
alignment.get_sequence_reverse_complement(secondary_species))
else:
output.write(alignment.get_sequence(secondary_species))
output.write("\n")
regions_extracted += 1
output.close()
return regions_extracted
##########################################################################
##########################################################################
def complement(template):
""" Generates the reverse complement of a template sequence """
# Turn the string around using slicing
backward_template = template[::-1]
# Create the complementary sequence from the backward template
reverse_template = backward_template.translate(
string.maketrans("ACTGactg", "TGACtgac"))
return reverse_template
def extractMAFGeneBlocks(bedfile,
maf_file,
genome_file,
outfile,
primary_species,
secondary_species,
keep_gaps=True):
"""
Is based on Dan Blankenburg's interval_to_maf_merged_fasta.py
Receives a bed12 containing intervals of interest, and maf containing
pairwise genomic alignment. Iterates through bed intervals and outputs
intervals in fasta format.
See comments in maf_utilities.py: maf blocks are always extracted in a
positive strand orientation relative to the src alignment, reverse
complementing is then done using method AlignedSequence method
get_sequence_reverse_complement.
MAF file is indexed on the fly using bx.align.maf.MultiIndexed
"""
index, index_filename = build_maf_index(maf_file)
output = IOTools.openFile(outfile, "w")
regions_extracted = 0
# iterate through intervals
for line_count, line in enumerate(IOTools.openFile(bedfile).readlines()):
try:
# retrieve exon starts & ends, plus all gtf fields
starts, ends, fields = get_starts_ends_fields_from_gene_bed(line)
# create spliced alignment (N.B. strand always +ve)
# for pep8 purposes
spec = [primary_species, secondary_species]
alignment = get_spliced_region_alignment(index,
primary_species,
fields[0],
starts,
ends,
strand='+',
species=spec,
mincols=0,
overwrite_with_gaps=False)
primary_name = secondary_name = fields[3]
alignment_strand = fields[5]
except Exception as e:
print("Error loading exon positions from input line %i: %s" %
(line_count, e))
break
if keep_gaps:
# write the stiched sequence to outfile in the correct orientation
output.write(">%s.%s\n" % (primary_species, primary_name))
if alignment_strand == "-":
output.write(
alignment.get_sequence_reverse_complement(primary_species))
else:
output.write(alignment.get_sequence(primary_species))
output.write("\n")
output.write(">%s.%s\n" % (secondary_species, secondary_name))
if alignment_strand == "-":
output.write(
alignment.get_sequence_reverse_complement(
secondary_species))
else:
output.write(alignment.get_sequence(secondary_species))
output.write("\n")
else:
# create indexed fasta
fasta = IndexedFasta.IndexedFasta(genome_file)
# retrieve exon sequence
exon_list = []
for exon in alignment.exons:
exon_list.append(
fasta.getSequence(fields[0], "+", exon.start, exon.end))
# write the stitched sequence to outfile in the correct orientation
output.write(">%s.%s\n" % (primary_species, primary_name))
if alignment_strand == "-":
output.write("".join([complement(x) for x in exon_list]))
else:
output.write("".join([x for x in exon_list]))
output.write("\n")
output.write(">%s.%s\n" % (secondary_species, secondary_name))
if alignment_strand == "-":
output.write(
alignment.get_sequence_reverse_complement(
secondary_species))
else:
output.write(alignment.get_sequence(secondary_species))
output.write("\n")
regions_extracted += 1
output.close()
return regions_extracted
##########################################################################
##########################################################################
def splitAlignedFasta(infile, out_stub, name_dict):
"""
Receives a fasta file containing multiple sequence alignments. Splits into
multiple outfiles, each containing sequence with the same interval_id.
Identifiers must be in format >species.interval_id
name_dict specifies the format for the outfile identifiers
"""
infile = IOTools.openFile(infile).readlines()
current_id = ""
line_number = 0
for line in infile:
line = line.rstrip()
line_number += 1
if line_number == 1:
current_id = line.split(".")[1]
out = open(os.path.join(out_stub, current_id + ".fasta"), "w")
if line_number % 2 == 1:
gene_id = line.split(".")[1]
if gene_id == current_id:
species = line.split(".")[0]
out.write(name_dict[species] + "\n")
else:
out.close()
current_id = gene_id
out = os.path.join(out_stub, current_id + ".fasta")
if os.path.exists(out):
raise IOError("There are two transcript with gene_id %s &"
" transcript_id %s" % current_id.split("__"))
else:
out = open(out, "w")
out.write(name_dict[line.split(".")[0]] + "\n")
else:
out.write(line + "\n")
out.close()
def removeGapsFromAlignedFasta(in_dir, out_dir, min_length=0):
"""
Receives a fasta file containing two or more aligned sequences,
removes gaps from the reference (first) alignment in file and
removes corresponding gaps intervals from subsequence sequence.
Any region of sequence flanked by gaps that is smaller than a specified min
length may also be removed WARNING: using this function will likely cause
frame shifts in resulting sequence.
"""
if not out_dir.endswith("/"):
out_dir = out_dir + "/"
fasta_files = os.listdir(in_dir)
X = 0
for fasta in fasta_files:
X += 1
if X > 5:
break
print(fasta)
print(os.path.abspath(fasta))
print(os.path.join(in_dir, fasta))
file_name = P.snip(os.path.basename(fasta), ".fasta")
lines = [line.strip() for line in open(fasta).readlines()]
# reference sequence name
name = lines[0]
# the reference sequence
seq = lines[1]
# return a list of start, end for ungapped alignment regions in
# reference
regex = re.compile("\w+")
intervals = [(x.start(), x.end()) for x in regex.finditer(seq)]
x = 1
for interval in intervals:
outfile = "".join(file_name, "__", str(x), ".fasta")
# remove intervals below specified length
if interval[1] - interval[0] <= int(min_length):
continue
else:
# remove gapped regions from reference and subsequent
# alignments
out = open(os.path.join(out_dir, outfile), "w")
out.write(name + "\n")
out.write(seq[interval[0]:interval[1]])
for line in enumerate(lines[2:], 1):
if line[0] % 2 == 1:
out.write(line + "\n")
else:
out.write(line[interval[0]:interval[1]])
out.close()
x += 1
def runPhyloCSF(in_fasta, tmp_dir, outfile):
"""
not actually used... plus removing gaps is not a good idea
"""
statement = ("zcat %(in_fasta)s |"
" cgat farm"
" --split-at-regex='\n(\s*)\n'"
" --chunk-size=1"
" --output-header"
" --temp-dir=%(tmp_dir)s"
" --use-cluster"
" --log=%(outfile)s.log"
" PhyloCSF 29mammals"
" --frames=3"
" --removeRefGaps"
" --species=%(species)s"
" > %(outfile)s")
def parsePhyloCSF(infile, outfile):
"""
Write phyloCSF result file out in a more readable format.
"""
outf = IOTools.openFile(outfile, "w")
outf.write("gene_id\ttranscript_id\tscore\tstart\tend\n")
for line in IOTools.openFile(infile).readlines():
line = line.split()
gene_id_transcript_id = P.snip(os.path.basename(line[0]), ".phyloCSF")
gene_id, transcript_id = gene_id_transcript_id.split("__")
line_out = [gene_id, transcript_id, line[3], line[4], line[5]]
outf.write("\t".join(line_out) + "\n")
outf.close()
|
CGATOxford/CGATPipelines
|
obsolete/PipelineLncRNA.py
|
Python
|
mit
| 100,312
|
[
"Galaxy"
] |
0d11493c36124ac0988b4365f1725d51f8fec611e850d58f1f76211c3868cb36
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from itertools import izip
from calibre.customize import Plugin as _Plugin
FONT_SIZES = [('xx-small', 1),
('x-small', None),
('small', 2),
('medium', 3),
('large', 4),
('x-large', 5),
('xx-large', 6),
(None, 7)]
class Plugin(_Plugin):
fbase = 12
fsizes = [5, 7, 9, 12, 13.5, 17, 20, 22, 24]
screen_size = (1600, 1200)
dpi = 100
def __init__(self, *args, **kwargs):
_Plugin.__init__(self, *args, **kwargs)
self.width, self.height = self.screen_size
fsizes = list(self.fsizes)
self.fkey = list(self.fsizes)
self.fsizes = []
for (name, num), size in izip(FONT_SIZES, fsizes):
self.fsizes.append((name, num, float(size)))
self.fnames = dict((name, sz) for name, _, sz in self.fsizes if name)
self.fnums = dict((num, sz) for _, num, sz in self.fsizes if num)
self.width_pts = self.width * 72./self.dpi
self.height_pts = self.height * 72./self.dpi
# Input profiles {{{
class InputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Input profile')
name = 'Default Input Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you know nothing about the input document.')
class SonyReaderInput(InputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (584, 754)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Input(SonyReaderInput):
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS 300.')
dpi = 200
class SonyReader900Input(SonyReaderInput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (584, 978)
class MSReaderInput(InputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketInput(InputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Input(InputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Input(HanlinV3Input):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 200
class CybookG3Input(InputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusInput(InputProfile):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class KindleInput(InputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class IlliadInput(InputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Input(InputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Input(InputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookInput(InputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
input_profiles = [InputProfile, SonyReaderInput, SonyReader300Input,
SonyReader900Input, MSReaderInput, MobipocketInput, HanlinV3Input,
HanlinV5Input, CybookG3Input, CybookOpusInput, KindleInput, IlliadInput,
IRexDR1000Input, IRexDR800Input, NookInput]
input_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
# }}}
class OutputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Output profile')
name = 'Default Output Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you want to produce a document intended to be read at a '
'computer or on a range of devices.')
#: The image size for comics
comic_screen_size = (584, 754)
#: If True the MOBI renderer on the device supports MOBI indexing
supports_mobi_indexing = False
#: If True output should be optimized for a touchscreen interface
touchscreen = False
touchscreen_news_css = ''
#: A list of extra (beyond CSS 2.1) modules supported by the device
#: Format is a cssutils profile dictionary (see iPad for example)
extra_css_modules = []
#: If True, the date is appended to the title of downloaded news
periodical_date_in_title = True
#: Characters used in jackets and catalogs
ratings_char = u'*'
empty_ratings_char = u' '
#: Unsupported unicode characters to be replaced during preprocessing
unsupported_unicode_chars = []
#: Number of ems that the left margin of a blockquote is rendered as
mobi_ems_per_blockquote = 1.0
#: Special periodical formatting needed in EPUB
epub_periodical_format = None
@classmethod
def tags_to_string(cls, tags):
from xml.sax.saxutils import escape
return escape(', '.join(tags))
class iPadOutput(OutputProfile):
name = 'iPad'
short_name = 'ipad'
description = _('Intended for the iPad and similar devices with a '
'resolution of 768x1024')
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 132.0
extra_css_modules = [
{
'name':'webkit',
'props': { '-webkit-border-bottom-left-radius':'{length}',
'-webkit-border-bottom-right-radius':'{length}',
'-webkit-border-top-left-radius':'{length}',
'-webkit-border-top-right-radius':'{length}',
'-webkit-border-radius': r'{border-width}(\s+{border-width}){0,3}|inherit',
},
'macros': {'border-width': '{length}|medium|thick|thin'}
}
]
ratings_char = u'\u2605' # filled star
empty_ratings_char = u'\u2606' # hollow star
touchscreen = True
# touchscreen_news_css {{{
touchscreen_news_css = u'''
/* hr used in articles */
.article_articles_list {
width:18%;
}
.article_link {
color: #593f29;
font-style: italic;
}
.article_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:32%;
}
.article_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:32%;
}
.article_sections_list {
width:18%;
}
.articles_link {
font-weight: bold;
}
.sections_link {
font-weight: bold;
}
.caption_divider {
border:#ccc 1px solid;
}
.touchscreen_navbar {
background:#c3bab2;
border:#ccc 0px solid;
border-collapse:separate;
border-spacing:1px;
margin-left: 5%;
margin-right: 5%;
page-break-inside:avoid;
width: 90%;
-webkit-border-radius:4px;
}
.touchscreen_navbar td {
background:#fff;
font-family:Helvetica;
font-size:80%;
/* UI touchboxes use 8px padding */
padding: 6px;
text-align:center;
}
.touchscreen_navbar td a:link {
color: #593f29;
text-decoration: none;
}
/* Index formatting */
.publish_date {
text-align:center;
}
.divider {
border-bottom:1em solid white;
border-top:1px solid gray;
}
hr.caption_divider {
border-color:black;
border-style:solid;
border-width:1px;
}
/* Feed summary formatting */
.article_summary {
display:inline-block;
padding-bottom:0.5em;
}
.feed {
font-family:sans-serif;
font-weight:bold;
font-size:larger;
}
.feed_link {
font-style: italic;
}
.feed_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:40%;
}
.feed_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:40%;
}
.feed_title {
text-align: center;
font-size: 160%;
}
.feed_up {
font-weight: bold;
width:20%;
}
.summary_headline {
font-weight:bold;
text-align:left;
}
.summary_byline {
text-align:left;
font-family:monospace;
}
.summary_text {
text-align:left;
}
'''
# }}}
class iPad3Output(iPadOutput):
screen_size = comic_screen_size = (2048, 1536)
dpi = 264.0
name = 'iPad 3'
short_name = 'ipad3'
description = _('Intended for the iPad 3 and similar devices with a '
'resolution of 1536x2048')
class TabletOutput(iPadOutput):
name = 'Tablet'
short_name = 'tablet'
description = _('Intended for generic tablet devices, does no resizing of images')
screen_size = (10000, 10000)
comic_screen_size = (10000, 10000)
class SamsungGalaxy(TabletOutput):
name = 'Samsung Galaxy'
short_name = 'galaxy'
description = _('Intended for the Samsung Galaxy and similar tablet devices with '
'a resolution of 600x1280')
screen_size = comic_screen_size = (600, 1280)
class NookHD(TabletOutput):
name = 'Nook HD+'
short_name = 'nook_hd_plus'
description = _('Intended for the Nook HD+ and similar tablet devices with '
'a resolution of 1280x1920')
screen_size = comic_screen_size = (1280, 1920)
class SonyReaderOutput(OutputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (590, 775)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
unsupported_unicode_chars = [u'\u201f', u'\u201b']
epub_periodical_format = 'sony'
#periodical_date_in_title = False
class KoboReaderOutput(OutputProfile):
name = 'Kobo Reader'
short_name = 'kobo'
description = _('This profile is intended for the Kobo Reader.')
screen_size = (536, 710)
comic_screen_size = (536, 710)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS-300.')
dpi = 200
class SonyReader900Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (600, 999)
comic_screen_size = screen_size
class GenericEink(SonyReaderOutput):
name = 'Generic e-ink'
short_name = 'generic_eink'
description = _('Suitable for use with any e-ink device')
epub_periodical_format = None
class GenericEinkLarge(GenericEink):
name = 'Generic e-ink large'
short_name = 'generic_eink_large'
description = _('Suitable for use with any large screen e-ink device')
screen_size = (600, 999)
comic_screen_size = screen_size
class JetBook5Output(OutputProfile):
name = 'JetBook 5-inch'
short_name = 'jetbook5'
description = _('This profile is intended for the 5-inch JetBook.')
screen_size = (480, 640)
dpi = 168.451
class SonyReaderLandscapeOutput(SonyReaderOutput):
name = 'Sony Reader Landscape'
short_name = 'sony-landscape'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/700 etc, in landscape mode. Mainly useful '
'for comics.')
screen_size = (784, 1012)
comic_screen_size = (784, 1012)
class MSReaderOutput(OutputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketOutput(OutputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Output(OutputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Output(HanlinV3Output):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
dpi = 200
class CybookG3Output(OutputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
comic_screen_size = (600, 757)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusOutput(SonyReaderOutput):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
epub_periodical_format = None
class KindleOutput(OutputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
@classmethod
def tags_to_string(cls, tags):
return u'%s <br/><span style="color:white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ')
class KindleDXOutput(OutputProfile):
name = 'Kindle DX'
short_name = 'kindle_dx'
description = _('This profile is intended for the Amazon Kindle DX.')
# Screen size is a best guess
screen_size = (744, 1022)
dpi = 150.0
comic_screen_size = (771, 1116)
#comic_screen_size = (741, 1022)
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
@classmethod
def tags_to_string(cls, tags):
return u'%s <br/><span style="color: white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ')
class KindlePaperWhiteOutput(KindleOutput):
name = 'Kindle PaperWhite'
short_name = 'kindle_pw'
description = _('This profile is intended for the Amazon Kindle PaperWhite')
# Screen size is a best guess
screen_size = (658, 940)
dpi = 212.0
comic_screen_size = screen_size
class KindleFireOutput(KindleDXOutput):
name = 'Kindle Fire'
short_name = 'kindle_fire'
description = _('This profile is intended for the Amazon Kindle Fire.')
screen_size = (570, 1016)
dpi = 169.0
comic_screen_size = (570, 1016)
@classmethod
def tags_to_string(cls, tags):
# The idiotic fire doesn't obey the color:white directive
from xml.sax.saxutils import escape
return escape(', '.join(tags))
class IlliadOutput(OutputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
comic_screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Output(OutputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
comic_screen_size = (996, 1241)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Output(OutputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
# Screen size is a best guess
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookOutput(OutputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 730)
comic_screen_size = (584, 730)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class NookColorOutput(NookOutput):
name = 'Nook Color'
short_name = 'nook_color'
description = _('This profile is intended for the B&N Nook Color.')
screen_size = (600, 900)
comic_screen_size = (594, 900)
dpi = 169
class BambookOutput(OutputProfile):
author = 'Li Fanxi'
name = 'Sanda Bambook'
short_name = 'bambook'
description = _('This profile is intended for the Sanda Bambook.')
# Screen size is for full screen display
screen_size = (580, 780)
# Comic size is for normal display
comic_screen_size = (540, 700)
dpi = 168.451
fbase = 12
fsizes = [10, 12, 14, 16]
class PocketBook900Output(OutputProfile):
author = 'Chris Lockfort'
name = 'PocketBook Pro 900'
short_name = 'pocketbook_900'
description = _('This profile is intended for the PocketBook Pro 900 series of devices.')
screen_size = (810, 1180)
dpi = 150.0
comic_screen_size = screen_size
class PocketBookPro912Output(OutputProfile):
author = 'Daniele Pizzolli'
name = 'PocketBook Pro 912'
short_name = 'pocketbook_pro_912'
description = _('This profile is intended for the PocketBook Pro 912 series of devices.')
# According to http://download.pocketbook-int.com/user-guides/E_Ink/912/User_Guide_PocketBook_912(EN).pdf
screen_size = (825, 1200)
dpi = 155.0
comic_screen_size = screen_size
output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output,
SonyReader900Output, MSReaderOutput, MobipocketOutput, HanlinV3Output,
HanlinV5Output, CybookG3Output, CybookOpusOutput, KindleOutput,
iPadOutput, iPad3Output, KoboReaderOutput, TabletOutput, SamsungGalaxy,
SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput, NookHD,
IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput,
BambookOutput, NookColorOutput, PocketBook900Output, PocketBookPro912Output,
GenericEink, GenericEinkLarge, KindleFireOutput, KindlePaperWhiteOutput]
output_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
|
sss/calibre-at-bzr
|
src/calibre/customize/profiles.py
|
Python
|
gpl-3.0
| 25,766
|
[
"Galaxy"
] |
331363d05f7792ffa6e66f2f8ba7589f59faa07fdeddb0036ff1bcc4996d89d5
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibauthorid Web Interface Logic and URL handler."""
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from copy import deepcopy
from pprint import pformat
try:
from invenio.jsonutils import json, CFG_JSON_AVAILABLE
except:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_config import CLAIMPAPER_ADMIN_ROLE
from invenio.bibauthorid_config import CLAIMPAPER_USER_ROLE
#from invenio.bibauthorid_config import EXTERNAL_CLAIMED_RECORDS_KEY
from invenio.config import CFG_SITE_LANG
from invenio.config import CFG_SITE_URL
from invenio.config import CFG_SITE_NAME
from invenio.config import CFG_INSPIRE_SITE
#from invenio.config import CFG_SITE_SECURE_URL
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language #, wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url
from invenio.webuser import getUid, page_not_authorized, collect_user_info, set_user_preferences
from invenio.webuser import email_valid_p, emailUnique
from invenio.webuser import get_email_from_username, get_uid_from_email, isUserSuperAdmin
from invenio.access_control_admin import acc_find_user_role_actions
from invenio.access_control_admin import acc_get_user_roles, acc_get_role_id
from invenio.search_engine import perform_request_search, sort_records
from invenio.search_engine_utils import get_fieldvalues
import invenio.bibauthorid_webapi as webapi
import invenio.bibauthorid_config as bconfig
from invenio.bibauthorid_frontinterface import get_bibrefrec_name_string
from invenio.bibauthorid_frontinterface import update_personID_names_string_set
from pprint import pformat
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDPages(WebInterfaceDirectory):
"""
Handle /person pages and AJAX requests
Supplies the methods
/person/<string>
/person/action
/person/welcome
/person/search
/person/you -> /person/<string>
/person/export
/person/claimstub
"""
_exports = ['', 'action', 'welcome', 'search', 'you', 'export', 'tickets_admin', 'claimstub']
def __init__(self, person_id=None):
"""
Constructor of the web interface.
@param person_id: The identifier of a user. Can be one of:
- a bibref: e.g. "100:1442,155"
- a person id: e.g. "14"
- a canonical id: e.g. "Ellis_J_1"
@type person_id: string
@return: will return an empty object if the identifier is of wrong type
@rtype: None (if something is not right)
"""
pid = -1
is_bibref = False
is_canonical_id = False
self.adf = self.__init_call_dispatcher()
if (not isinstance(person_id, str)) or (not person_id):
self.person_id = pid
return None
if person_id.count(":") and person_id.count(","):
is_bibref = True
elif webapi.is_valid_canonical_id(person_id):
is_canonical_id = True
if is_bibref and pid > -2:
bibref = person_id
table, ref, bibrec = None, None, None
if not bibref.count(":"):
pid = -2
if not bibref.count(","):
pid = -2
try:
table = bibref.split(":")[0]
ref = bibref.split(":")[1].split(",")[0]
bibrec = bibref.split(":")[1].split(",")[1]
except IndexError:
pid = -2
try:
table = int(table)
ref = int(ref)
bibrec = int(bibrec)
except (ValueError, TypeError):
pid = -2
if pid == -1:
try:
pid = int(webapi.get_person_id_from_paper(person_id))
except (ValueError, TypeError):
pid = -1
else:
pid = -1
elif is_canonical_id:
try:
pid = int(webapi.get_person_id_from_canonical_id(person_id))
except (ValueError, TypeError):
pid = -1
else:
try:
pid = int(person_id)
except ValueError:
pid = -1
self.person_id = pid
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: string
'''
self._session_bareinit(req)
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'ticketid': (int, -1),
'open_claim': (str, None)})
ln = argd['ln']
# ln = wash_language(argd['ln'])
rt_ticket_id = argd['ticketid']
req.argd = argd #needed for perform_req_search
session = get_session(req)
ulevel = self.__get_user_role(req)
uid = getUid(req)
if self.person_id < 0:
return redirect_to_url(req, "%s/person/search" % (CFG_SITE_URL))
if isUserSuperAdmin({'uid': uid}):
ulevel = 'admin'
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
try:
pinfo = session["personinfo"]
except KeyError:
pinfo = dict()
session['personinfo'] = pinfo
if 'open_claim' in argd and argd['open_claim']:
pinfo['claim_in_process'] = True
elif "claim_in_process" in pinfo and pinfo["claim_in_process"]:
pinfo['claim_in_process'] = True
else:
pinfo['claim_in_process'] = False
uinfo = collect_user_info(req)
uinfo['precached_viewclaimlink'] = pinfo['claim_in_process']
set_user_preferences(uid, uinfo)
pinfo['ulevel'] = ulevel
if self.person_id != -1:
pinfo["claimpaper_admin_last_viewed_pid"] = self.person_id
pinfo["ln"] = ln
if not "ticket" in pinfo:
pinfo["ticket"] = []
if rt_ticket_id:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.save()
content = ''
for part in ['optional_menu', 'ticket_box', 'personid_info', 'tabs', 'footer']:
content += self.adf[part][ulevel](req, form, ln)
title = self.adf['title'][ulevel](req, form, ln)
body = TEMPLATE.tmpl_person_detail_layout(content)
metaheaderadd = self._scripts()
self._clean_ticket(req)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not bconfig.AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if user_pid[1]:
user_pid = user_pid[0][0]
else:
user_pid = -1
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _session_bareinit(self, req):
'''
Initializes session personinfo entry if none exists
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
uid = getUid(req)
ulevel = self.__get_user_role(req)
if isUserSuperAdmin({'uid': uid}):
ulevel = 'admin'
try:
pinfo = session["personinfo"]
pinfo['ulevel'] = ulevel
if "claimpaper_admin_last_viewed_pid" not in pinfo:
pinfo["claimpaper_admin_last_viewed_pid"] = -2
if 'ln' not in pinfo:
pinfo["ln"] = 'en'
if 'ticket' not in pinfo:
pinfo["ticket"] = []
session.save()
except KeyError:
pinfo = dict()
session['personinfo'] = pinfo
pinfo['ulevel'] = ulevel
pinfo["claimpaper_admin_last_viewed_pid"] = -2
pinfo["ln"] = 'en'
pinfo["ticket"] = []
session.save()
def _lookup(self, component, path):
"""
This handler parses dynamic URLs:
- /person/1332 shows the page of person 1332
- /person/100:5522,1431 shows the page of the person
identified by the table:bibref,bibrec pair
"""
if not component in self._exports:
return WebInterfaceBibAuthorIDPages(component), path
def __init_call_dispatcher(self):
'''
Initialization of call dispacher dictionary
@return: call dispatcher dictionary
@rtype: dict
'''
#author_detail_functions
adf = dict()
adf['title'] = dict()
adf['optional_menu'] = dict()
adf['ticket_box'] = dict()
adf['tabs'] = dict()
adf['footer'] = dict()
adf['personid_info'] = dict()
adf['ticket_dispatch'] = dict()
adf['ticket_commit'] = dict()
adf['title']['guest'] = self._generate_title_guest
adf['title']['user'] = self._generate_title_user
adf['title']['admin'] = self._generate_title_admin
adf['optional_menu']['guest'] = self._generate_optional_menu_guest
adf['optional_menu']['user'] = self._generate_optional_menu_user
adf['optional_menu']['admin'] = self._generate_optional_menu_admin
adf['ticket_box']['guest'] = self._generate_ticket_box_guest
adf['ticket_box']['user'] = self._generate_ticket_box_user
adf['ticket_box']['admin'] = self._generate_ticket_box_admin
adf['personid_info']['guest'] = self._generate_person_info_box_guest
adf['personid_info']['user'] = self._generate_person_info_box_user
adf['personid_info']['admin'] = self._generate_person_info_box_admin
adf['tabs']['guest'] = self._generate_tabs_guest
adf['tabs']['user'] = self._generate_tabs_user
adf['tabs']['admin'] = self._generate_tabs_admin
adf['footer']['guest'] = self._generate_footer_guest
adf['footer']['user'] = self._generate_footer_user
adf['footer']['admin'] = self._generate_footer_admin
adf['ticket_dispatch']['guest'] = self._ticket_dispatch_user
adf['ticket_dispatch']['user'] = self._ticket_dispatch_user
adf['ticket_dispatch']['admin'] = self._ticket_dispatch_admin
adf['ticket_commit']['guest'] = self._ticket_commit_guest
adf['ticket_commit']['user'] = self._ticket_commit_user
adf['ticket_commit']['admin'] = self._ticket_commit_admin
return adf
def _generate_title_guest(self, req, form, ln):
'''
Generate the title for a guest user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
if self.person_id:
return 'Attribute papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
else:
return 'Attribute papers'
def _generate_title_user(self, req, form, ln):
'''
Generate the title for a regular user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
if self.person_id:
return 'Attribute papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
else:
return 'Attribute papers'
def _generate_title_admin(self, req, form, ln):
'''
Generate the title for an admin user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
if self.person_id:
return 'Attribute papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
else:
return 'Attribute papers'
def _generate_optional_menu_guest(self, req, form, ln):
'''
Generate the menu for a guest user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu()
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def _generate_optional_menu_user(self, req, form, ln):
'''
Generate the menu for a regular user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu()
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def _generate_optional_menu_admin(self, req, form, ln):
'''
Generate the title for an admin user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin()
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def _generate_ticket_box_guest(self, req, form, ln):
'''
Generate the semi-permanent info box for a guest user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
session = get_session(req)
pinfo = session['personinfo']
ticket = pinfo['ticket']
pendingt = []
donet = []
for t in ticket:
if 'execution_result' in t:
if t['execution_result'] == True:
donet.append(t)
else:
pendingt.append(t)
if len(pendingt) == 1:
message = 'There is ' + str(len(pendingt)) + ' transaction in progress.'
else:
message = 'There are ' + str(len(pendingt)) + ' transactions in progress.'
teaser = 'Claim in process!'
if len(pendingt) == 0:
box = ""
else:
box = TEMPLATE.tmpl_ticket_box(teaser, message)
if len(donet) > 0:
teaser = 'Success!'
if len(donet) == 1:
message = str(len(donet)) + ' transaction successfully executed.'
else:
message = str(len(donet)) + ' transactions successfully executed.'
box = box + TEMPLATE.tmpl_notification_box(message, teaser)
return box
def _generate_ticket_box_user(self, req, form, ln):
'''
Generate the semi-permanent info box for a regular user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
return self._generate_ticket_box_guest(req, form, ln)
def _generate_ticket_box_admin(self, req, form, ln):
'''
Generate the semi-permanent info box for an admin user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
return self._generate_ticket_box_guest(req, form, ln)
def _generate_person_info_box_guest(self, req, form, ln):
'''
Generate the name info box for a guest user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
return self._generate_person_info_box_admin(req, form, ln)
def _generate_person_info_box_user(self, req, form, ln):
'''
Generate the name info box for a regular user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
return self._generate_person_info_box_admin(req, form, ln)
def _generate_person_info_box_admin(self, req, form, ln):
'''
Generate the name info box for an admin user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def _generate_tabs_guest(self, req, form, ln):
'''
Generate the tabs content for a guest user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
session = get_session(req)
# uid = getUid(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
links = [] # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
verbiage_dict = {'confirmed': 'Papers', 'repealed': _('Papers removed from this profile'),
'review': _('Papers in need of review'),
'tickets': _('Open Tickets'), 'data': _('Data'),
'confirmed_ns': _('Papers of this Person'),
'repealed_ns': _('Papers _not_ of this Person'),
'review_ns': _('Papers in need of review'),
'tickets_ns': _('Tickets for this Person'),
'data_ns': _('Additional Data for this Person')}
buttons_verbiage_dict = {'mass_buttons': {'no_doc_string': _('Sorry, there are currently no documents to be found in this category.'),
'b_confirm': _('Yes, those papers are by this person.'),
'b_repeal': _('No, those papers are not by this person'),
'b_to_others': _('Assign to other person'),
'b_forget': _('Forget decision')},
'record_undecided': {'alt_confirm': _('Confirm!'),
'confirm_text': _('Yes, this paper is by this person.'),
'alt_repeal': _('Rejected!'),
'repeal_text': _('No, this paper is <i>not</i> by this person'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_confirmed': {'alt_confirm': _('Confirmed.'),
'confirm_text': _('Marked as this person\'s paper'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repeal!'),
'repeal_text': _('But it\'s <i>not</i> this person\'s paper.'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_repealed': {'alt_confirm': _('Confirm!'),
'confirm_text': _('But it <i>is</i> this person\'s paper.'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repealed'),
'repeal_text': _('Marked as not this person\'s paper'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')}}
return self._generate_tabs_admin(req, form, ln, show_tabs=tabs, ticket_links=links,
show_reset_button=False,
open_tickets=[], verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict)
def _generate_tabs_user(self, req, form, ln):
'''
Generate the tabs content for a regular user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid)[0][0]:
verbiage_dict = {'confirmed': _('Your papers'), 'repealed': _('Not your papers'),
'review': _('Papers in need of review'),
'tickets': _('Your tickets'), 'data': _('Data'),
'confirmed_ns': _('Your papers'),
'repealed_ns': _('Not your papers'),
'review_ns': _('Papers in need of review'),
'tickets_ns': _('Your tickets'),
'data_ns': _('Additional Data for this Person')}
buttons_verbiage_dict = {'mass_buttons': {'no_doc_string': _('Sorry, there are currently no documents to be found in this category.'),
'b_confirm': _('These are mine!'),
'b_repeal': _('These are not mine!'),
'b_to_others': _('It\'s not mine, but I know whose it is!'),
'b_forget': _('Forget decision')},
'record_undecided': {'alt_confirm': _('Mine!'),
'confirm_text': _('This is my paper!'),
'alt_repeal': _('Not mine!'),
'repeal_text': _('This is not my paper!'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_confirmed': {'alt_confirm': _('Not Mine.'),
'confirm_text': _('Marked as my paper!'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget assignment decision'),
'alt_repeal': _('Not Mine!'),
'repeal_text': _('But this is mine!'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_repealed': {'alt_confirm': _('Mine!'),
'confirm_text': _('But this is my paper!'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision!'),
'alt_repeal': _('Not Mine!'),
'repeal_text': _('Marked as not your paper.'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')}}
else:
verbiage_dict = {'confirmed': _('Papers'), 'repealed': _('Papers removed from this profile'),
'review': _('Papers in need of review'),
'tickets': _('Your tickets'), 'data': _('Data'),
'confirmed_ns': _('Papers of this Person'),
'repealed_ns': _('Papers _not_ of this Person'),
'review_ns': _('Papers in need of review'),
'tickets_ns': _('Tickes you created about this person'),
'data_ns': _('Additional Data for this Person')}
buttons_verbiage_dict = {'mass_buttons': {'no_doc_string': _('Sorry, there are currently no documents to be found in this category.'),
'b_confirm': _('Yes, those papers are by this person.'),
'b_repeal': _('No, those papers are not by this person'),
'b_to_others': _('Assign to other person'),
'b_forget': _('Forget decision')},
'record_undecided': {'alt_confirm': _('Confirm!'),
'confirm_text': _('Yes, this paper is by this person.'),
'alt_repeal': _('Rejected!'),
'repeal_text': _('No, this paper is <i>not</i> by this person'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_confirmed': {'alt_confirm': _('Confirmed.'),
'confirm_text': _('Marked as this person\'s paper'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repeal!'),
'repeal_text': _('But it\'s <i>not</i> this person\'s paper.'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_repealed': {'alt_confirm': _('Confirm!'),
'confirm_text': _('But it <i>is</i> this person\'s paper.'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repealed'),
'repeal_text': _('Marked as not this person\'s paper'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')}}
session = get_session(req)
uid = getUid(req)
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = []
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return self._generate_tabs_admin(req, form, ln, show_tabs=tabs, ticket_links=links,
open_tickets=tickets, verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict)
def _generate_tabs_admin(self, req, form, ln,
show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
open_tickets=None, ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'],
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
'''
Generate the tabs content for an admin user
@param req: Apache Request Object
@type req: Apache Request Object
@param form: POST/GET variables of the request
@type form: dict
@param ln: language to show this page in
@type ln: string
@param show_tabs: list of tabs to display
@type show_tabs: list of strings
@param ticket_links: list of links to display
@type ticket_links: list of strings
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
session = get_session(req)
personinfo = {}
records = []
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
if not verbiage_dict:
verbiage_dict = self._get_default_verbiage_dicts_for_admin(req)
if not buttons_verbiage_dict:
buttons_verbiage_dict = self._get_default_buttons_verbiage_dicts_for_admin(req)
all_papers = webapi.get_papers_by_person_id(self.person_id,
ext_out=True)
for paper in all_papers:
records.append({'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]})
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
rt_tickets = None
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1:
if 'tickets' in show_tabs:
show_tabs.remove('tickets')
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
# Send data to template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def _get_default_verbiage_dicts_for_admin(self, req):
session = get_session(req)
personinfo = {}
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
verbiage_dict = {'confirmed': _('Papers'), 'repealed': _('Papers removed from this profile'),
'review': _('Papers in need of review'),
'tickets': _('Tickets'), 'data': _('Data'),
'confirmed_ns': _('Papers of this Person'),
'repealed_ns': _('Papers _not_ of this Person'),
'review_ns': _('Papers in need of review'),
'tickets_ns': _('Request Tickets'),
'data_ns': _('Additional Data for this Person')}
return verbiage_dict
def _get_default_buttons_verbiage_dicts_for_admin(self, req):
session = get_session(req)
personinfo = {}
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
buttons_verbiage_dict = {'mass_buttons': {'no_doc_string': _('Sorry, there are currently no documents to be found in this category.'),
'b_confirm': _('Yes, those papers are by this person.'),
'b_repeal': _('No, those papers are not by this person'),
'b_to_others': _('Assign to other person'),
'b_forget': _('Forget decision')},
'record_undecided': {'alt_confirm': _('Confirm!'),
'confirm_text': _('Yes, this paper is by this person.'),
'alt_repeal': _('Rejected!'),
'repeal_text': _('No, this paper is <i>not</i> by this person'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_confirmed': {'alt_confirm': _('Confirmed.'),
'confirm_text': _('Marked as this person\'s paper'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repeal!'),
'repeal_text': _('But it\'s <i>not</i> this person\'s paper.'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')},
'record_repealed': {'alt_confirm': _('Confirm!'),
'confirm_text': _('But it <i>is</i> this person\'s paper.'),
'alt_forget': _('Forget decision!'),
'forget_text': _('Forget decision.'),
'alt_repeal': _('Repealed'),
'repeal_text': _('Marked as not this person\'s paper'),
'to_other_text': _('Assign to another person'),
'alt_to_other': _('To other person!')}}
return buttons_verbiage_dict
def _generate_footer_guest(self, req, form, ln):
return self._generate_footer_admin(req, form, ln)
def _generate_footer_user(self, req, form, ln):
return self._generate_footer_admin(req, form, ln)
def _generate_footer_admin(self, req, form, ln):
return TEMPLATE.tmpl_invenio_search_box()
def _ticket_dispatch_guest(self, req):
'''
Takes care of the ticket when in guest mode
'''
return self._ticket_dispatch_user(req)
def _ticket_dispatch_user(self, req):
'''
Takes care of the ticket when in user and guest mode
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
# ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
bibref_check_required = self._ticket_review_bibref_check(req)
if bibref_check_required:
return bibref_check_required
for t in ticket:
t['status'] = webapi.check_transaction_permissions(uid,
t['bibref'],
t['pid'],
t['action'])
session.save()
return self._ticket_final_review(req)
def _ticket_dispatch_admin(self, req):
'''
Takes care of the ticket when in administrator mode
'''
return self._ticket_dispatch_user(req)
def _ticket_review_bibref_check(self, req):
'''
checks if some of the transactions on the ticket are needing a review.
If it's the case prompts the user to select the right bibref
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
if ("bibref_check_required" in pinfo and pinfo["bibref_check_required"]
and "bibref_check_reviewed_bibrefs" in pinfo):
for rbibreft in pinfo["bibref_check_reviewed_bibrefs"]:
if not rbibreft.count("||") or not rbibreft.count(","):
continue
rpid, rbibref = rbibreft.split("||")
rrecid = rbibref.split(",")[1]
rpid = webapi.wash_integer_id(rpid)
for ticket_update in [row for row in ticket
if (row['bibref'] == str(rrecid) and
row['pid'] == rpid)]:
ticket_update["bibref"] = rbibref
del(ticket_update["incomplete"])
for ticket_remove in [row for row in ticket
if ('incomplete' in row)]:
ticket.remove(ticket_remove)
if ("bibrefs_auto_assigned" in pinfo):
del(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo):
del(pinfo["bibrefs_to_confirm"])
del(pinfo["bibref_check_reviewed_bibrefs"])
pinfo["bibref_check_required"] = False
session.save()
return ""
else:
bibrefs_auto_assigned = {}
bibrefs_to_confirm = {}
needs_review = []
# if ("bibrefs_auto_assigned" in pinfo
# and pinfo["bibrefs_auto_assigned"]):
# bibrefs_auto_assigned = pinfo["bibrefs_auto_assigned"]
#
# if ("bibrefs_to_confirm" in pinfo
# and pinfo["bibrefs_to_confirm"]):
# bibrefs_to_confirm = pinfo["bibrefs_to_confirm"]
for transaction in ticket:
if not webapi.is_valid_bibref(transaction['bibref']):
transaction['incomplete'] = True
needs_review.append(transaction)
if not needs_review:
pinfo["bibref_check_required"] = False
session.save()
return ""
for transaction in needs_review:
recid = webapi.wash_integer_id(transaction['bibref'])
if recid < 0:
continue #this doesn't look like a recid--discard!
pid = transaction['pid']
if ((pid in bibrefs_auto_assigned
and 'bibrecs' in bibrefs_auto_assigned[pid]
and recid in bibrefs_auto_assigned[pid]['bibrecs'])
or
(pid in bibrefs_to_confirm
and 'bibrecs' in bibrefs_to_confirm[pid]
and recid in bibrefs_to_confirm[pid]['bibrecs'])):
continue # we already assessed those bibrefs.
fctptr = webapi.get_possible_bibrefs_from_pid_bibrec
bibrec_refs = fctptr(pid, [recid])
person_name = webapi.get_most_frequent_name_from_pid(pid)
for brr in bibrec_refs:
if len(brr[1]) == 1:
if not pid in bibrefs_auto_assigned:
bibrefs_auto_assigned[pid] = {
'person_name': person_name,
'canonical_id': "TBA",
'bibrecs': {brr[0]: brr[1]}}
else:
bibrefs_auto_assigned[pid]['bibrecs'][brr[0]] = brr[1]
else:
if not brr[1]:
tmp = webapi.get_bibrefs_from_bibrecs([brr[0]])
try:
brr[1] = tmp[0][1]
except IndexError:
continue # No bibrefs on record--discard
if not pid in bibrefs_to_confirm:
bibrefs_to_confirm[pid] = {
'person_name': person_name,
'canonical_id': "TBA",
'bibrecs': {brr[0]: brr[1]}}
else:
bibrefs_to_confirm[pid]['bibrecs'][brr[0]] = brr[1]
if bibrefs_to_confirm or bibrefs_auto_assigned:
pinfo["bibref_check_required"] = True
baa = deepcopy(bibrefs_auto_assigned)
btc = deepcopy(bibrefs_to_confirm)
for pid in baa:
for rid in baa[pid]['bibrecs']:
baa[pid]['bibrecs'][rid] = []
for pid in btc:
for rid in btc[pid]['bibrecs']:
btc[pid]['bibrecs'][rid] = []
pinfo["bibrefs_auto_assigned"] = baa
pinfo["bibrefs_to_confirm"] = btc
else:
pinfo["bibref_check_required"] = False
session.save()
if 'external_first_entry' in pinfo and pinfo['external_first_entry']:
del(pinfo["external_first_entry"])
pinfo['external_first_entry_skip_review'] = True
session.save()
return "" # don't bother the user the first time
body = TEMPLATE.tmpl_bibref_check(bibrefs_auto_assigned,
bibrefs_to_confirm)
body = TEMPLATE.tmpl_person_detail_layout(body)
metaheaderadd = self._scripts(kill_browser_cache=True)
title = _("Submit Attribution Information")
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_final_review(self, req):
'''
displays the user what can/cannot finally be done, leaving the option of kicking some
transactions from the ticket before commit
'''
session = get_session(req)
uid = getUid(req)
userinfo = collect_user_info(uid)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
ticket = [row for row in ticket if not "execution_result" in row]
skip_checkout_page = True
upid = -1
user_first_name = ""
user_first_name_sys = False
user_last_name = ""
user_last_name_sys = False
user_email = ""
user_email_sys = False
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
if ("external_firstname" in userinfo
and userinfo["external_firstname"]):
user_first_name = userinfo["external_firstname"]
user_first_name_sys = True
elif "user_first_name" in pinfo and pinfo["user_first_name"]:
user_first_name = pinfo["user_first_name"]
if ("external_familyname" in userinfo
and userinfo["external_familyname"]):
user_last_name = userinfo["external_familyname"]
user_last_name_sys = True
elif "user_last_name" in pinfo and pinfo["user_last_name"]:
user_last_name = pinfo["user_last_name"]
if ("email" in userinfo
and not userinfo["email"] == "guest"):
user_email = userinfo["email"]
user_email_sys = True
elif "user_email" in pinfo and pinfo["user_email"]:
user_email = pinfo["user_email"]
pinfo["user_first_name"] = user_first_name
pinfo["user_first_name_sys"] = user_first_name_sys
pinfo["user_last_name"] = user_last_name
pinfo["user_last_name_sys"] = user_last_name_sys
pinfo["user_email"] = user_email
pinfo["user_email_sys"] = user_email_sys
if "upid" in pinfo and pinfo["upid"]:
upid = pinfo["upid"]
else:
dbpid = webapi.get_pid_from_uid(uid)
if dbpid and dbpid[1]:
if dbpid[0] and not dbpid[0] == -1:
upid = dbpid[0][0]
pinfo["upid"] = upid
session.save()
if not (user_first_name or user_last_name or user_email):
skip_checkout_page = False
if [row for row in ticket
if row["status"] in ["denied", "warning_granted",
"warning_denied"]]:
skip_checkout_page = False
if 'external_first_entry_skip_review' in pinfo and pinfo['external_first_entry_skip_review']:
del(pinfo["external_first_entry_skip_review"])
skip_checkout_page = True
session.save()
if (not ticket or skip_checkout_page
or ("checkout_confirmed" in pinfo
and pinfo["checkout_confirmed"]
and "checkout_faulty_fields" in pinfo
and not pinfo["checkout_faulty_fields"])):
self.adf['ticket_commit'][ulevel](req)
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
if "bibref_check_required" in pinfo:
del(pinfo["bibref_check_required"])
# if "user_ticket_comments" in pinfo:
# del(pinfo["user_ticket_comments"])
session.save()
return self._ticket_dispatch_end(req)
for tt in list(ticket):
if not 'bibref' in tt or not 'pid' in tt:
del(ticket[tt])
continue
tt['authorname_rec'] = get_bibrefrec_name_string(tt['bibref'])
tt['person_name'] = webapi.get_most_frequent_name_from_pid(tt['pid'])
mark_yours = []
mark_not_yours = []
if upid >= 0:
mark_yours = [row for row in ticket
if (str(row["pid"]) == str(upid) and
row["action"] in ["to_other_person", "confirm"])]
mark_not_yours = [row for row in ticket
if (str(row["pid"]) == str(upid) and
row["action"] in ["repeal", "reset"])]
mark_theirs = [row for row in ticket
if ((not str(row["pid"]) == str(upid)) and
row["action"] in ["to_other_person", "confirm"])]
mark_not_theirs = [row for row in ticket
if ((not str(row["pid"]) == str(upid)) and
row["action"] in ["repeal", "reset"])]
session.save()
body = TEMPLATE.tmpl_ticket_final_review(req, mark_yours,
mark_not_yours,
mark_theirs,
mark_not_theirs)
body = TEMPLATE.tmpl_person_detail_layout(body)
metaheaderadd = self._scripts(kill_browser_cache=True)
title = _("Please review your actions")
#body = body + '<pre>' + pformat(pinfo) + '</pre>'
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_commit_admin(self, req):
'''
Actual execution of the ticket transactions
'''
self._clean_ticket(req)
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
userinfo = {'uid-ip': "%s||%s" % (uid, req.remote_ip)}
if "user_ticket_comments" in pinfo:
userinfo['comments'] = pinfo["user_ticket_comments"]
if "user_first_name" in pinfo:
userinfo['firstname'] = pinfo["user_first_name"]
if "user_last_name" in pinfo:
userinfo['lastname'] = pinfo["user_last_name"]
if "user_email" in pinfo:
userinfo['email'] = pinfo["user_email"]
pids_to_update = set()
for t in ticket:
t['execution_result'] = webapi.execute_action(t['action'], t['pid'], t['bibref'], uid,
pids_to_update,
userinfo['uid-ip'], str(userinfo))
update_personID_names_string_set(pids_to_update)
session.save()
def _ticket_commit_user(self, req):
'''
Actual execution of the ticket transactions
'''
self._clean_ticket(req)
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
ok_tickets = []
userinfo = {'uid-ip': "%s||%s" % (uid, req.remote_ip)}
if "user_ticket_comments" in pinfo:
userinfo['comments'] = pinfo["user_ticket_comments"]
if "user_first_name" in pinfo:
userinfo['firstname'] = pinfo["user_first_name"]
if "user_last_name" in pinfo:
userinfo['lastname'] = pinfo["user_last_name"]
if "user_email" in pinfo:
userinfo['email'] = pinfo["user_email"]
pids_to_update = set()
for t in list(ticket):
if t['status'] in ['granted', 'warning_granted']:
t['execution_result'] = webapi.execute_action(t['action'],
t['pid'], t['bibref'], uid,
pids_to_update,
userinfo['uid-ip'], str(userinfo))
ok_tickets.append(t)
ticket.remove(t)
update_personID_names_string_set(pids_to_update)
if ticket:
webapi.create_request_ticket(userinfo, ticket)
if CFG_INSPIRE_SITE and ok_tickets:
webapi.send_user_commit_notification_email(userinfo, ok_tickets)
for t in ticket:
t['execution_result'] = True
ticket[:] = ok_tickets
session.save()
def _ticket_commit_guest(self, req):
'''
Actual execution of the ticket transactions
'''
self._clean_ticket(req)
session = get_session(req)
pinfo = session["personinfo"]
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip)}
if "user_ticket_comments" in pinfo:
if pinfo["user_ticket_comments"]:
userinfo['comments'] = pinfo["user_ticket_comments"]
else:
userinfo['comments'] = "No comments submitted."
if "user_first_name" in pinfo:
userinfo['firstname'] = pinfo["user_first_name"]
if "user_last_name" in pinfo:
userinfo['lastname'] = pinfo["user_last_name"]
if "user_email" in pinfo:
userinfo['email'] = pinfo["user_email"]
ticket = pinfo['ticket']
webapi.create_request_ticket(userinfo, ticket)
for t in ticket:
t['execution_result'] = True
session.save()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid
'''
session = get_session(req)
pinfo = session["personinfo"]
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
uinfo = collect_user_info(req)
uinfo['precached_viewclaimlink'] = True
uid = getUid(req)
set_user_preferences(uid, uinfo)
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.save()
return redirect_to_url(req, referer)
return redirect_to_url(req, "%s/person/%s?open_claim=True" % (CFG_SITE_URL,
webapi.get_person_redirect_link(
pinfo["claimpaper_admin_last_viewed_pid"])))
def _clean_ticket(self, req):
'''
Removes from a ticket the transactions with an execution_result flag
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
for t in list(ticket):
if 'execution_result' in t:
ticket.remove(t)
session.save()
def __get_user_role(self, req):
'''
Determines whether a user is guest, user or admin
'''
minrole = 'guest'
role = 'guest'
if not req:
return minrole
uid = getUid(req)
if not isinstance(uid, int):
return minrole
admin_role_id = acc_get_role_id(CLAIMPAPER_ADMIN_ROLE)
user_role_id = acc_get_role_id(CLAIMPAPER_USER_ROLE)
user_roles = acc_get_user_roles(uid)
if admin_role_id in user_roles:
role = 'admin'
elif user_role_id in user_roles:
role = 'user'
if role == 'guest' and webapi.is_external_user(uid):
role = 'user'
return role
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
def _scripts(self, kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd:
if (not argd["user_email"]
or not email_valid_p(argd["user_email"])):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.save()
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests
Valid mass actions are:
- confirm: confirm assignments to a person
- repeal: repeal assignments from a person
- reset: reset assignments of a person
- cancel: clean the session (erase tickets and so on)
- to_other_person: assign a document from a person to another person
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: string
'''
self._session_bareinit(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'pid': (int, None),
'confirm': (str, None),
'repeal': (str, None),
'reset': (str, None),
'cancel': (str, None),
'cancel_stage': (str, None),
'bibref_check_submit': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_submit': (str, None),
'checkout_remove_transaction': (str, None),
'to_other_person': (str, None),
'cancel_search_ticket': (str, None),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None),
'claim': (str, None),
'cancel_rt_ticket': (str, None),
'commit_rt_ticket': (str, None),
'rt_id': (int, None),
'rt_action': (str, None),
'selection': (list, []),
'set_canonical_name': (str, None),
'canonical_name': (str, None)})
ln = argd['ln']
# ln = wash_language(argd['ln'])
pid = None
action = None
bibrefs = None
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
tempticket = []
if not "ln" in pinfo:
pinfo["ln"] = ln
session.save()
if 'confirm' in argd and argd['confirm']:
action = 'confirm'
elif 'repeal' in argd and argd['repeal']:
action = 'repeal'
elif 'reset' in argd and argd['reset']:
action = 'reset'
elif 'bibref_check_submit' in argd and argd['bibref_check_submit']:
action = 'bibref_check_submit'
elif 'cancel' in argd and argd['cancel']:
action = 'cancel'
elif 'cancel_stage' in argd and argd['cancel_stage']:
action = 'cancel_stage'
elif 'cancel_search_ticket' in argd and argd['cancel_search_ticket']:
action = 'cancel_search_ticket'
elif 'checkout' in argd and argd['checkout']:
action = 'checkout'
elif 'checkout_submit' in argd and argd['checkout_submit']:
action = 'checkout_submit'
elif ('checkout_remove_transaction' in argd
and argd['checkout_remove_transaction']):
action = 'checkout_remove_transaction'
elif ('checkout_continue_claiming' in argd
and argd['checkout_continue_claiming']):
action = "checkout_continue_claiming"
elif 'cancel_rt_ticket' in argd and argd['cancel_rt_ticket']:
action = 'cancel_rt_ticket'
elif 'commit_rt_ticket' in argd and argd['commit_rt_ticket']:
action = 'commit_rt_ticket'
elif 'to_other_person' in argd and argd['to_other_person']:
action = 'to_other_person'
elif 'claim' in argd and argd['claim']:
action = 'claim'
elif 'set_canonical_name' in argd and argd['set_canonical_name']:
action = 'set_canonical_name'
no_access = self._page_access_permission_wall(req, pid)
if no_access and not action in ["claim"]:
return no_access
if action in ['to_other_person', 'claim']:
if 'selection' in argd and len(argd['selection']) > 0:
bibrefs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if action == 'claim':
return self._ticket_open_claim(req, bibrefs, ln)
else:
return self._ticket_open_assign_to_other_person(req, bibrefs, form)
if action in ["cancel_stage"]:
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.save()
return self._ticket_dispatch_end(req)
if action in ["checkout_submit"]:
pinfo["checkout_faulty_fields"] = []
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
else:
pinfo["checkout_confirmed"] = True
session.save()
return self.adf['ticket_dispatch'][ulevel](req)
#return self._ticket_final_review(req)
if action in ["checkout_remove_transaction"]:
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket
if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.save()
return self.adf['ticket_dispatch'][ulevel](req)
#return self._ticket_final_review(req)
if action in ["checkout_continue_claiming"]:
pinfo["checkout_faulty_fields"] = []
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
if (action in ['bibref_check_submit']
or (not action
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"])):
if not action in ['bibref_check_submit']:
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.save()
return self.adf['ticket_dispatch'][ulevel](req)
pinfo["bibref_check_reviewed_bibrefs"] = []
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = []
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = []
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref) and
tpid > -1):
add_rev(element + "," + str(bibrec))
session.save()
return self.adf['ticket_dispatch'][ulevel](req)
if not action:
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
if action in ['confirm', 'repeal', 'reset']:
if 'pid' in argd:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without a person id!")
if 'selection' in argd and len(argd['selection']) > 0:
bibrefs = argd['selection']
else:
if pid == -3:
return self._error_page(req, ln,
"Fatal: Please select a paper to assign to the new person first!")
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any paper selected!")
if 'rt_id' in argd and argd['rt_id']:
rt_id = argd['rt_id']
for b in bibrefs:
self._cancel_transaction_from_rt_ticket(rt_id, pid, action, b)
#create temporary ticket
if pid == -3:
pid = webapi.create_new_person(uid)
for bibref in bibrefs:
tempticket.append({'pid': pid, 'bibref': bibref, 'action': action})
#check if ticket targets (bibref for pid) are already in ticket
for t in tempticket:
for e in list(ticket):
if e['pid'] == t['pid'] and e['bibref'] == t['bibref']:
ticket.remove(e)
ticket.append(t)
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
#start ticket processing chain
pinfo["claimpaper_admin_last_viewed_pid"] = pid
session.save()
return self.adf['ticket_dispatch'][ulevel](req)
# return self.perform(req, form)
elif action in ['cancel']:
self.__session_cleanup(req)
# return self._error_page(req, ln,
# "Not an error! Session cleaned! but "
# "redirect to be implemented")
return self._ticket_dispatch_end(req)
elif action in ['cancel_search_ticket']:
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.save()
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
return redirect_to_url(req, "/person/%s" % webapi.get_person_redirect_link(pid))
return self.search(req, form)
elif action in ['checkout']:
return self.adf['ticket_dispatch'][ulevel](req)
#return self._ticket_final_review(req)
elif action in ['cancel_rt_ticket', 'commit_rt_ticket']:
if 'selection' in argd and len(argd['selection']) > 0:
bibref = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if 'pid' in argd and argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if action == 'cancel_rt_ticket':
if 'rt_id' in argd and argd['rt_id'] and 'rt_action' in argd and argd['rt_action']:
rt_id = argd['rt_id']
rt_action = argd['rt_action']
if 'selection' in argd and len(argd['selection']) > 0:
bibrefs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: no bibref")
for b in bibrefs:
self._cancel_transaction_from_rt_ticket(rt_id, pid, rt_action, b)
return redirect_to_url(req, "/person/%s" % webapi.get_person_redirect_link(pid))
return self._cancel_rt_ticket(req, bibref[0], pid)
elif action == 'commit_rt_ticket':
return self._commit_rt_ticket(req, bibref[0], pid)
elif action == 'set_canonical_name':
if 'pid' in argd and argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if 'canonical_name' in argd and argd['canonical_name']:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
uid = getUid(req)
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "/person/%s" % webapi.get_person_redirect_link(pid))
else:
return self._error_page(req, ln,
"Fatal: What were I supposed to do?")
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.save()
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid and tpid[0] and tpid[1] and tpid[0][0]:
pid = tpid[0][0]
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
else:
last_viewed_pid = False
else:
last_viewed_pid = False
else:
last_viewed_pid = False
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.save()
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = self._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'confirm'
search_ticket['bibrefs'] = bibrefs
session.save()
return self.search(req, form)
def comments(self, req, form):
return ""
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "/person/%s" %
webapi.get_person_redirect_link(str(pid)))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, bibref, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
open_rt_tickets = webapi.get_person_request_ticket(pid)
tic = [a for a in open_rt_tickets if str(a[1]) == str(bibref)]
if len(tic) > 0:
tic = tic[0][0]
#create temporary ticket
tempticket = []
for t in tic:
if t[0] in ['confirm', 'repeal']:
tempticket.append({'pid': pid, 'bibref': t[1], 'action': t[0]})
#check if ticket targets (bibref for pid) are already in ticket
for t in tempticket:
for e in list(ticket):
if e['pid'] == t['pid'] and e['bibref'] == t['bibref']:
ticket.remove(e)
ticket.append(t)
session.save()
#start ticket processing chain
webapi.delete_request_ticket(pid, bibref)
return self.adf['ticket_dispatch'][ulevel](req)
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
#pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.save()
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
teaser = _('Person search for assignment in progress!')
message = _('You are searching for a person to assign the following papers:')
return TEMPLATE.tmpl_search_ticket_box(teaser, message, search_ticket)
def search(self, req, form, is_fallback=False, fallback_query='', fallback_title='', fallback_message=''):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: string
'''
self._session_bareinit(req)
session = get_session(req)
no_access = self._page_access_permission_wall(req)
new_person_link = False
if no_access:
return no_access
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
new_person_link = True
body = ''
if search_ticket:
body = body + self._generate_search_ticket_box(req)
max_num_show_papers = 5
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
ln = argd['ln']
# ln = wash_language(argd['ln'])
query = None
recid = None
nquery = None
search_results = None
title = "Person Search"
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
if is_fallback and fallback_query:
query = fallback_query
if query:
authors = []
if query.count(":"):
try:
left, right = query.split(":")
try:
recid = int(left)
nquery = str(right)
except (ValueError, TypeError):
try:
recid = int(right)
nquery = str(left)
except (ValueError, TypeError):
recid = None
nquery = query
except ValueError:
recid = None
nquery = query
else:
nquery = query
sorted_results = webapi.search_person_ids_by_name(nquery)
for index, results in enumerate(sorted_results):
pid = results[0]
# authorpapers = webapi.get_papers_by_person_id(pid, -1)
# authorpapers = sorted(authorpapers, key=itemgetter(0),
# reverse=True)
if index < bconfig.PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT:
authorpapers = [[paper] for paper in
sort_records(None, [i[0] for i in
webapi.get_papers_by_person_id(pid, -1)],
sort_field="year", sort_order="a")]
else:
authorpapers = [['Not retrieved to increase performances.']]
if (recid and
not (str(recid) in [row[0] for row in authorpapers])):
continue
authors.append([results[0], results[1],
authorpapers[0:max_num_show_papers], len(authorpapers)])
search_results = authors
if recid and (len(search_results) == 1) and not is_fallback:
return redirect_to_url(req, "/person/%s" % search_results[0][0])
body = body + TEMPLATE.tmpl_author_search(query, search_results, search_ticket, author_pages_mode=True, fallback_mode=is_fallback,
fallback_title=fallback_title, fallback_message=fallback_message, new_person_link=new_person_link)
if not is_fallback:
body = TEMPLATE.tmpl_person_detail_layout(body)
return page(title=title,
metaheaderadd=self._scripts(kill_browser_cache=True),
body=body,
req=req,
language=ln)
def claimstub(self, req, form):
'''
Generate stub page before claiming process
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'person': (str, '')})
ln = argd['ln']
# ln = wash_language(argd['ln'])
_ = gettext_set_language(ln)
person = '-1'
if 'person' in argd and argd['person']:
person = argd['person']
session = get_session(req)
try:
pinfo = session["personinfo"]
if pinfo['ulevel'] == 'admin':
return redirect_to_url(req, '%s/person/%s?open_claim=True' % (CFG_SITE_URL, person))
except KeyError:
pass
if bconfig.BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE:
return redirect_to_url(req, '%s/person/%s?open_claim=True' % (CFG_SITE_URL, person))
body = TEMPLATE.tmpl_claim_stub(person)
pstr = 'Person ID missing or invalid'
if person != '-1':
pstr = person
title = _('You are going to claim papers for: %s' % pstr)
return page(title=title,
metaheaderadd=self._scripts(kill_browser_cache=True),
body=body,
req=req,
language=ln)
def welcome(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
uid = getUid(req)
self._session_bareinit(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
# ln = wash_language(argd['ln'])
_ = gettext_set_language(ln)
if uid == 0:
return page_not_authorized(req, text=_("This page in not accessible directly."))
title_message = _('Welcome!')
# start continuous writing to the browser...
req.content_type = "text/html"
req.send_http_header()
ssl_param = 0
if req.is_https():
ssl_param = 1
req.write(pageheaderonly(req=req, title=title_message,
language=ln, secure_page_p=ssl_param))
req.write(TEMPLATE.tmpl_welcome_start())
body = ""
if CFG_INSPIRE_SITE:
body = TEMPLATE.tmpl_welcome_arxiv()
else:
body = TEMPLATE.tmpl_welcome()
req.write(body)
# now do what will take time...
pid = webapi.arxiv_login(req)
#session must be read after webapi.arxiv_login did it's stuff
session = get_session(req)
pinfo = session["personinfo"]
pinfo["claimpaper_admin_last_viewed_pid"] = pid
session.save()
link = TEMPLATE.tmpl_welcome_link()
req.write(link)
req.write("<br><br>")
uinfo = collect_user_info(req)
arxivp = []
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
try:
for i in uinfo['external_arxivids'].split(';'):
arxivp.append(i)
except (IndexError, KeyError):
pass
req.write(TEMPLATE.tmpl_welcome_arXiv_papers(arxivp))
if CFG_INSPIRE_SITE:
#logs arXive logins, for debug purposes.
dbg = ('uinfo= ' + str(uinfo) + '\npinfo= ' + str(pinfo) + '\nreq= ' + str(req)
+ '\nsession= ' + str(session))
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.insert_log(userinfo, pid, 'arXiv_login', 'dbg', '', comment=dbg)
req.write(TEMPLATE.tmpl_welcome_end())
req.write(pagefooteronly(req=req))
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
self._session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
body = TEMPLATE.tmpl_tickets_admin(tickets)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = 'Open RT tickets'
return page(title=title,
metaheaderadd=self._scripts(),
body=body,
req=req)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in bconfig.VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
me = welcome
you = welcome
# pylint: enable=C0301
# pylint: enable=W0613
|
robk5uj/invenio
|
modules/bibauthorid/lib/bibauthorid_webinterface.py
|
Python
|
gpl-2.0
| 99,247
|
[
"ADF"
] |
fb57e4f7b214b71ed7f50fd59cf1c3d5c6042113cd824ee728052d22648446fb
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016 Haggi Krey, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import pymel.core as pm
import maya.OpenMayaMPx as OpenMayaMPx
import sys
log = logging.getLogger("mtapLogger")
def binMeshTranslatorOpts(parent, action, initialSettings, resultCallback):
useTransform = True
oneFilePerMesh = False
createProxies = True
proxyRes = 0.1
exportDir = ""
if initialSettings is not None and len(initialSettings) > 0:
#oneFilePerMesh=0;createProxies=1;proxyRes=0.1;exportDir=;createProxies=1
opts = initialSettings.split(";")
for opt in opts:
name, value = opt.split("=")
if name == "oneFilePerMesh":
oneFilePerMesh = int(value)
if name == "createProxies":
createProxies = int(value)
if name == "useTransform":
useTransform = int(value)
if name == "proxyRes":
proxyRes = float(proxyRes)
if action == "post":
pm.setParent(parent)
with pm.columnLayout(adj = True):
pm.checkBox("MSH_OPTS_DOTRANSFORM", label = "Use Transform", v=useTransform)
pm.checkBox("MSH_OPTS_ONEFILE", label = "One File Per Mesh", v=oneFilePerMesh)
pm.checkBox("MSH_OPTS_DOPROX", label = "Create ProxyFiles", v=createProxies)
pm.floatFieldGrp("MSH_OPTS_PROXPERC", label="Proxy Resolution", v1 = proxyRes)
if action == "query":
resultOptions = ""
oneFilePerMesh = pm.checkBox("MSH_OPTS_ONEFILE", query=True, v=True)
resultOptions += "oneFilePerMesh={0}".format(int(oneFilePerMesh))
doProx = pm.checkBox("MSH_OPTS_DOPROX", query=True, v=True)
resultOptions += ";createProxies={0}".format(int(doProx))
proxyRes = pm.floatFieldGrp("MSH_OPTS_PROXPERC", query=True, v1 = True)
resultOptions += ";proxyRes={0}".format(proxyRes)
doTransform = pm.checkBox("MSH_OPTS_DOTRANSFORM", query=True, v=True)
resultOptions += ";useTransform={0}".format(int(doTransform))
melCmd = '{0} "{1}"'.format(resultCallback,resultOptions)
pm.mel.eval(melCmd)
return 1
def binMeshTranslatorWrite(fileName, optionString, accessMode):
exportPath = fileName
createProxies = False
proxyRes = 0.1
all = False
oneFilePerMesh = False
useTransform = False
# the very first sign is a ; so the first element cannot be split again, no idea why
opts = optionString.split(";")
for opt in opts:
try:
name, value = opt.split("=")
if name == "oneFilePerMesh":
oneFilePerMesh = int(value)
if name == "createProxies":
createProxies = int(value)
if name == "useTransform":
useTransform = int(value)
if name == "proxyRes":
proxyRes = float(proxyRes)
except:
pass
if accessMode == "selected":
selection = []
for object in pm.ls(sl=True):
selection.extend(object.getChildren(ad=True, type="mesh"))
if len(selection) == 0:
raise
pm.binMeshWriterCmd(selection, path=exportPath, doProxy=createProxies, percentage=proxyRes, doTransform=useTransform, oneFilePerMesh=oneFilePerMesh)
if accessMode == "all":
pm.binMeshWriterCmd(path=exportPath, doProxy=createProxies, percentage=proxyRes, doTransform=useTransform, all=True, oneFilePerMesh=oneFilePerMesh)
return True
def binMeshTranslatorRead(fileName, optionString, accessMode):
importPath = fileName
createProxies = False
proxyRes = 0.1
# the very first sign is a ; so the first element cannot be split again, no idea why
opts = optionString.split(";")
for opt in opts:
try:
name, value = opt.split("=")
if name == "createProxies":
createProxies = int(value)
if name == "proxyRes":
proxyRes = float(proxyRes)
except:
pass
pm.binMeshReaderCmd(path=importPath)
return True
def binMeshCheckAndCreateShadingGroup(shadingGroup):
try:
shadingGroup = pm.PyNode(shadingGroup)
except:
shader = pm.shadingNode("appleseedSurfaceShader", asShader=True)
shadingGroup = pm.sets(renderable=True, noSurfaceShader=True, empty=True, name=shadingGroup)
shader.outColor >> shadingGroup.surfaceShader
# I'd prefer to use the connected polyshape directly, but because of the connections in a multi shader assignenment
# I do not get a directo connection from the creator node to the polymesh but it goes to some groupId nodes what makes
# it a bit complicated to search for the first connected mesh in the API. So I simply call the script with the creator node
# and let the python script search for the mesh.
def listConnections(node, allList, meshList):
for con in node.outputs(sh=True):
if not con in allList:
allList.append(con)
if con.type() == 'mesh':
meshList.append(con)
else:
listConnections(con, allList, meshList)
def getConnectedPolyShape(creatorShape):
try:
creatorShape = pm.PyNode(creatorShape)
except:
return None
meshList = []
allList = []
listConnections(creatorShape, allList, meshList)
return meshList
# perFaceAssingments contains a double list, one face id list for every shading group
def binMeshAssignShader(creatorShape = None, shadingGroupList=[], perFaceAssingments=[]):
if not creatorShape:
return
if pm.PyNode(creatorShape).type() != "mtap_standinMeshNode":
log.error("binMeshAssignShader: Node {0} is not a creator shape".format(creatorShape))
return
polyShapeList = getConnectedPolyShape(creatorShape)
for polyShape in polyShapeList:
for index, shadingGroup in enumerate(shadingGroupList):
binMeshCheckAndCreateShadingGroup(shadingGroup)
assignments = perFaceAssingments[index]
print assignments
faceSelection = []
for polyId in assignments:
faceSelection.append(polyId)
fs = polyShape.f[faceSelection]
pm.sets(shadingGroup, e=True, forceElement=str(fs))
|
haggi/appleseed-maya
|
module/scripts/appleseed_maya/binmeshtranslator.py
|
Python
|
mit
| 7,523
|
[
"VisIt"
] |
62b90ea684f2a4e53f4e4e71f9ffd956895e2760d8648d8017f1577be1918e43
|
# Copyright (C) 2020
# Max Planck Institute for Polymer Research & JGU Mainz
# Copyright (C) 2012,2013,2015,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************
espressopp.VerletList
*********************
.. function:: espressopp.VerletList(system, cutoff, exclusionlist, useBuffers, useSOA)
:param system:
:param cutoff:
:param exclusionlist: (default: [])
:param useBuffers: Whether particle neighbors are buffered to improve rebuild times. (default: True)
:param useSOA: Whether the alternative structure of arrays form is used for buffers. (default: False)
:type system:
:type cutoff:
:type exclusionlist:
:type useBuffers:
:type useSOA:
.. function:: espressopp.VerletList.exclude(exclusionlist)
:param exclusionlist:
:type exclusionlist:
:rtype:
.. function:: espressopp.VerletList.getAllPairs()
:rtype:
.. function:: espressopp.VerletList.localSize()
:rtype:
.. function:: espressopp.VerletList.totalSize()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class VerletListLocal(_espressopp.VerletList):
def __init__(self, system, cutoff, exclusionlist=[], useBuffers=True, useSOA=False):
if pmi.workerIsActive():
if (exclusionlist == []):
# rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, True, useBuffers, useSOA)
else:
# do not rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, False, useBuffers, useSOA)
# add exclusions
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# now rebuild list with exclusions
self.cxxclass.rebuild(self)
def totalSize(self):
if pmi.workerIsActive():
return self.cxxclass.totalSize(self)
def localSize(self):
if pmi.workerIsActive():
return self.cxxclass.localSize(self)
def exclude(self, exclusionlist):
"""
Each processor takes the broadcasted exclusion list
and adds it to its list.
"""
if pmi.workerIsActive():
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# rebuild list with exclusions
self.cxxclass.rebuild(self)
def getAllPairs(self):
if pmi.workerIsActive():
pairs=[]
npairs=self.localSize()
for i in xrange(npairs):
pair=self.cxxclass.getPair(self, i+1)
pairs.append(pair)
return pairs
if pmi.isController:
class VerletList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.VerletListLocal',
pmiproperty = [ 'builds' ],
pmicall = [ 'totalSize', 'exclude', 'connect', 'disconnect', 'getVerletCutoff', 'resetTimers' ],
pmiinvoke = [ 'getAllPairs','getTimers' ]
)
|
govarguz/espressopp
|
src/VerletList.py
|
Python
|
gpl-3.0
| 3,985
|
[
"ESPResSo"
] |
c3b13c55888a10ef6604455b13366bf73593f6d929ed3209a901f89b89dcd0e7
|
#-*- coding:UTF-8 -*-
#! /usr/bin/python
import getpass,sys
from spider import renrenspider
version = "0.3"
if __name__ == "__main__":
email=raw_input('请输入用户名:')
password=getpass.getpass('请输入密码:')
renrenspider=renrenspider(email,password)
renrenspider.login()
mode=999
while(mode!='000'):
mode=raw_input('请输入操作代码:')
if(mode=='120'):
content=raw_input('请输入状态内容:')
renrenspider.publish(content)
if(mode=='200'):
content=raw_input('请输入要访问的ID:')
renrenspider.visit(content)
if(mode=='100'):
renrenspider.feed(renrenspider.file)
renrenspider.show()
sys.exit()
|
jamesliu96/renren
|
main.py
|
Python
|
mit
| 667
|
[
"VisIt"
] |
7773c0221435d18f64a21bb228efc87509044e0edc5cc74e89b9a4bc93d26628
|
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from edc_registration.models import RegisteredSubject
from edc_appointment.models import Appointment
from edc_constants.constants import (
FEMALE, SCHEDULED, SCREENED, CONSENTED, FAILED_ELIGIBILITY, ALIVE, OFF_STUDY, ON_STUDY)
from edc_visit_schedule.models.visit_definition import VisitDefinition
from edc_identifier.subject.classes import InfantIdentifier
from tshilo_dikotla.constants import INFANT
from .maternal_consent import MaternalConsent
from .maternal_ultrasound_initial import MaternalUltraSoundInitial
from .antenatal_enrollment import AntenatalEnrollment
from .maternal_eligibility import MaternalEligibility
from .maternal_eligibility_loss import MaternalEligibilityLoss
from .maternal_off_study import MaternalOffStudy
from .maternal_visit import MaternalVisit
from .maternal_labour_del import MaternalLabourDel
from .potential_call import PotentialCall
@receiver(post_save, weak=False, dispatch_uid="maternal_eligibility_on_post_save")
def maternal_eligibility_on_post_save(sender, instance, raw, created, using, **kwargs):
"""Creates/Updates RegisteredSubject and creates or deletes MaternalEligibilityLoss
If participant is consented, does nothing
* If registered subject does not exist, it will be created and some attrs
updated from the MaternalEligibility;
* If registered subject already exists will update some attrs from the MaternalEligibility;
* If registered subject and consent already exist, does nothing.
Note: This is the ONLY place RegisteredSubject is created for mothers in this project."""
if not raw:
if isinstance(instance, MaternalEligibility) and not kwargs.get('update_fields'):
if not instance.is_eligible:
try:
maternal_eligibility_loss = MaternalEligibilityLoss.objects.get(
maternal_eligibility_id=instance.id)
maternal_eligibility_loss.report_datetime = instance.report_datetime
maternal_eligibility_loss.reason_ineligible = instance.ineligibility
maternal_eligibility_loss.user_modified = instance.user_modified
maternal_eligibility_loss.save()
except MaternalEligibilityLoss.DoesNotExist:
MaternalEligibilityLoss.objects.create(
maternal_eligibility_id=instance.id,
report_datetime=instance.report_datetime,
reason_ineligible=instance.ineligibility,
user_created=instance.user_created,
user_modified=instance.user_modified)
else:
MaternalEligibilityLoss.objects.filter(maternal_eligibility_id=instance.id).delete()
try:
registered_subject = RegisteredSubject.objects.get(
screening_identifier=instance.eligibility_id,
subject_type='maternal')
MaternalConsent.objects.get(registered_subject=registered_subject)
except RegisteredSubject.DoesNotExist:
registered_subject = create_maternal_registered_subject(instance)
instance.registered_subject = registered_subject
instance.save()
except MaternalConsent.DoesNotExist:
registered_subject = update_maternal_registered_subject(registered_subject, instance)
registered_subject.save()
def create_maternal_registered_subject(instance):
return RegisteredSubject.objects.create(
created=instance.created,
first_name='Mother',
gender=FEMALE,
registration_status=SCREENED,
screening_datetime=instance.report_datetime,
screening_identifier=instance.eligibility_id,
screening_age_in_years=instance.age_in_years,
subject_type='maternal',
user_created=instance.user_created)
def update_maternal_registered_subject(registered_subject, instance):
registered_subject.first_name = 'Mother'
registered_subject.gender = FEMALE
registered_subject.registration_status = SCREENED
registered_subject.screening_datetime = instance.report_datetime
registered_subject.screening_identifier = instance.eligibility_id
registered_subject.screening_age_in_years = instance.age_in_years
registered_subject.subject_type = 'maternal'
registered_subject.user_modified = instance.user_modified
return registered_subject
@receiver(post_save, weak=False, dispatch_uid="maternal_consent_on_post_save")
def maternal_consent_on_post_save(sender, instance, raw, created, using, **kwargs):
"""Update maternal_eligibility consented flag and consent fields on registered subject."""
if not raw:
if isinstance(instance, MaternalConsent):
maternal_eligibility = MaternalEligibility.objects.get(
registered_subject=instance.registered_subject)
maternal_eligibility.is_consented = True
maternal_eligibility.save(update_fields=['is_consented'])
instance.registered_subject.registration_datetime = instance.consent_datetime
instance.registered_subject.registration_status = CONSENTED
instance.registered_subject.subject_identifier = instance.subject_identifier
instance.registered_subject.initials = instance.initials
instance.registered_subject.last_name = instance.last_name
instance.registered_subject.identity = instance.identity
instance.registered_subject.dob = instance.dob
instance.registered_subject.subject_consent_id = instance.id
instance.registered_subject.subject_consent_id = instance.pk
instance.registered_subject.save()
@receiver(post_save, weak=False, dispatch_uid="ineligible_take_off_study")
def ineligible_take_off_study(sender, instance, raw, created, using, **kwargs):
"""If not is_eligible, creates the 1000M visit and sets to off study."""
if not raw:
try:
if not instance.is_eligible and not instance.pending_ultrasound:
report_datetime = instance.report_datetime
visit_definition = VisitDefinition.objects.get(code=instance.off_study_visit_code)
appointment = Appointment.objects.get(
registered_subject=instance.registered_subject,
visit_definition=visit_definition)
maternal_visit = MaternalVisit.objects.get(appointment=appointment)
if maternal_visit.reason != FAILED_ELIGIBILITY:
maternal_visit.reason = FAILED_ELIGIBILITY
maternal_visit.study_status = OFF_STUDY
maternal_visit.save()
except MaternalVisit.DoesNotExist:
MaternalVisit.objects.create(
appointment=appointment,
report_datetime=report_datetime,
survival_status=ALIVE,
study_status=OFF_STUDY,
reason=FAILED_ELIGIBILITY)
except AttributeError as e:
pass
# if 'is_eligible' not in str(e) and 'off_study_visit_code' not in str(e):
# raise
# except VisitDefinition.DoesNotExist:
# pass
# except Appointment.DoesNotExist:
# pass
def put_back_on_study_from_failed_eligibility(instance):
"""Attempts to change the 1000M maternal visit back to scheduled
from off study."""
with transaction.atomic():
try:
visit_definition = VisitDefinition.objects.get(code='1000M')
appointment = Appointment.objects.get(
registered_subject=instance.registered_subject,
visit_definition=visit_definition)
maternal_visit = MaternalVisit.objects.get(
appointment=appointment)
maternal_visit.study_status = ON_STUDY
maternal_visit.reason = SCHEDULED
maternal_visit.save()
except MaternalVisit.DoesNotExist:
MaternalVisit.objects.create(
appointment=appointment,
report_datetime=instance.report_datetime,
survival_status=ALIVE,
study_status=ON_STUDY,
reason=SCHEDULED)
except VisitDefinition.DoesNotExist:
pass
except Appointment.DoesNotExist:
pass
@receiver(post_save, weak=False, dispatch_uid="eligible_put_back_on_study")
def eligible_put_back_on_study(sender, instance, raw, created, using, **kwargs):
"""changes the 1000M visit to scheduled from off study if is_eligible."""
if not raw:
try:
if isinstance(instance, AntenatalEnrollment) and (instance.pending_ultrasound or instance.is_eligible):
MaternalOffStudy.objects.get(
maternal_visit__appointment__registered_subject=instance.registered_subject)
except AttributeError as e:
if 'is_eligible' not in str(e) and 'registered_subject' not in str(e):
raise
except MaternalOffStudy.DoesNotExist:
put_back_on_study_from_failed_eligibility(instance)
@receiver(post_save, weak=False, dispatch_uid="maternal_ultrasound_delivery_initial_on_post_save")
def maternal_ultrasound_delivery_initial_on_post_save(sender, instance, raw, created, using, **kwargs):
"""Update antenatal enrollment to indicate if eligibility is passed on not based on ultra sound form results."""
if not raw:
if isinstance(instance, MaternalUltraSoundInitial) or isinstance(instance, MaternalLabourDel):
# re-save antenatal enrollment record to recalculate eligibility
antenatal_enrollment = instance.antenatal_enrollment
antenatal_enrollment.pending_ultrasound = False
antenatal_enrollment.save()
@receiver(post_save, weak=False, dispatch_uid='create_infant_identifier_on_labour_delivery')
def create_infant_identifier_on_labour_delivery(sender, instance, raw, created, using, **kwargs):
"""Creates an identifier for the registered infant.
RegisteredSubject.objects.create( is called by InfantIdentifier
Only one infant per mother is allowed."""
if not raw and created:
if isinstance(instance, MaternalLabourDel):
if instance.live_infants_to_register == 1:
maternal_registered_subject = instance.registered_subject
maternal_consent = MaternalConsent.objects.get(
registered_subject=maternal_registered_subject)
maternal_ultrasound = MaternalUltraSoundInitial.objects.get(
maternal_visit__appointment__registered_subject=instance.registered_subject)
with transaction.atomic():
infant_identifier = InfantIdentifier(
maternal_identifier=maternal_registered_subject.subject_identifier,
study_site=maternal_consent.study_site,
birth_order=0,
live_infants=int(maternal_ultrasound.number_of_gestations),
live_infants_to_register=instance.live_infants_to_register,
user=instance.user_created)
RegisteredSubject.objects.using(using).create(
subject_identifier=infant_identifier.get_identifier(),
registration_datetime=instance.delivery_datetime,
subject_type=INFANT,
user_created=instance.user_created,
created=timezone.now(),
first_name='No Name',
initials=None,
registration_status='DELIVERED',
relative_identifier=maternal_consent.subject_identifier,
study_site=maternal_consent.study_site)
@receiver(post_save, weak=False, dispatch_uid='create_potential_calls_on_post_save')
def create_potential_calls_on_post_save(sender, instance, raw, created, using, **kwargs):
if not raw:
if isinstance(instance, Appointment):
try:
PotentialCall.objects.get(
visit_code=instance.visit_definition.code,
identity=instance.registered_subject.identity,
subject_identifier=instance.registered_subject.subject_identifier,
first_name=instance.registered_subject.first_name,
last_name=instance.registered_subject.last_name,
initials=instance.registered_subject.initials,
gender=instance.registered_subject.gender,
dob=instance.registered_subject.dob)
except PotentialCall.DoesNotExist:
PotentialCall.objects.create(
approximate_date=instance.appt_datetime.date(),
visit_code=instance.visit_definition.code,
identity=instance.registered_subject.identity,
subject_identifier=instance.registered_subject.subject_identifier,
first_name=instance.registered_subject.first_name,
last_name=instance.registered_subject.last_name,
initials=instance.registered_subject.initials,
gender=instance.registered_subject.gender,
dob=instance.registered_subject.dob,
consented=True)
|
TshepangRas/tshilo-dikotla
|
td_maternal/models/signals.py
|
Python
|
gpl-2.0
| 13,723
|
[
"VisIt"
] |
0a6771b1be3d50b0f6e91901a3dba56445dbb9a68ca4936963b47e3b2ec0a046
|
# -*- coding: utf-8 -*-
import sys
import math
from pylab import *
try:
import moose
except ImportError:
print("ERROR: Could not import moose. Please add the directory containing moose.py in your PYTHONPATH")
import sys
sys.exit(1)
from moose.utils import * # for BSplineFill
class AchSyn_STG(moose.SynChan):
"""Acetylcholine graded synapse"""
def __init__(self, *args):
moose.SynChan.__init__(self,*args)
self.Ek = -80e-3 # V
# For event based synapses, I had a strength of 5e-6 S
# to compensate for event-based,
# but for the original graded synapses, 5e-9 S is correct.
self.Gbar = 5e-9 # S # set weight on connecting the network
self.tau1 = 100e-3 # s # this is Vpre dependent (see below)
self.tau2 = 0.0 # single first order equation
Vth = -35e-3 # V
Delta = 5e-3 # V
######## Graded synapse activation
inhsyntable = moose.Interpol(self.path+"/graded_table")
graded = moose.Mstring(self.path+'/graded')
graded.value = 'True'
mgblock = moose.Mstring(self.path+'/mgblockStr')
mgblock.value = 'False'
# also needs a synhandler
moosesynhandler = moose.SimpleSynHandler(self.path+'/handler')
# connect the SimpleSynHandler or the STDPSynHandler to the SynChan (double exp)
moose.connect( moosesynhandler, 'activationOut', self, 'activation' )
# ds/dt = s_inf/tau - s/tau = A - Bs
# where A=s_inf/tau is activation, B is 1/tau
# Fill up the activation and tau tables
# Graded synapse tau
inhtautable = moose.Interpol(self.path+"/tau_table")
inhtautable.xmin = -70e-3 # V
inhtautable.xmax = 0e-3 # V
tau = [self.tau1] # at -70 mV
tau.extend( [self.tau1*(1. - 1./(1+math.exp((Vth-vm)/Delta))) \
for vm in arange(-70e-3,0.00001e-3,70e-3/1000.)] )
inhtautable.vector = array(tau)
inhtautable.connect("lookupOut",self,"setTau1")
# Graded synapse activation
inhsyntable.xmin = -70e-3 # V
inhsyntable.xmax = 0e-3 # V
act = [0.0] # at -70 mV
act.extend( [1/(1+math.exp((Vth-vm)/Delta)) \
for vm in arange(-70e-3,0.00001e-3,70e-3/1000.)] )
act = array(act) / array(tau) # element-wise division # NOTE: A = s_inf/tau
inhsyntable.vector = array(act)
inhsyntable.connect("lookupOut",self,"activation")
|
BhallaLab/moose-examples
|
neuroml/lobster_pyloric/synapses/AchSyn_STG.py
|
Python
|
gpl-2.0
| 2,461
|
[
"MOOSE"
] |
03e4f701aa82b182aab47a6e01c16956395589398e603fcad72b2cfd1da5f182
|
#!/usr/env/python
"""
rock_weathering.py
CellLab-CTS model that simulates the weathering of rock to saprolite around
a network of fractures.
Created (and translated from earlier code by) by Greg Tucker, Jul 2015
"""
from __future__ import print_function
import time
import numpy as np
from landlab import RasterModelGrid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.raster_cts import RasterCTS
from landlab.components.fracture_grid.fracture_grid import make_frac_grid
import matplotlib
from landlab.io.netcdf import write_netcdf
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent the
grain-by-grain transformation of bedrock to saprolite.
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
Weathering here is treated very simply: a bedrock particle adjacent to a
saprolite particle has a specified probability (rate) of weathering to
saprolite; in other words, a rock-saprolite pair can turn into a
saprolite-saprolite pair.
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) 1 (0-1) 0.5
2 (1-0) 0.5
1 (0-1) 3 (1-1) 1.0
2 (1-0) 3 (1-1) 1.0
3 (1-1) (none) -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,1,0), 1., 'weathering') )
xn_list.append( Transition((1,0,0), (1,1,0), 1., 'weathering') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 200 # number of rows in grid
nc = 200 # number of columns in grid
plot_interval = 0.05 # time interval for plotting (unscaled)
run_duration = 5.0 # duration of run (unscaled)
report_interval = 10.0 # report interval, in real-time seconds
frac_spacing = 10 # average fracture spacing, nodes
outfilename = 'wx' # name for netCDF files
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Counter for output files
time_slice = 0
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'rock', 1 : 'saprolite' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid.
# (Note use of numpy's uint8 data type. This saves memory AND allows us
# to write output to a netCDF3 file; netCDF3 does not handle the default
# 64-bit integer type)
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=np.uint8)
node_state_grid[:] = make_frac_grid(frac_spacing, model_grid=mg)
# Create the CA model
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Set up the color map
rock_color = (0.8, 0.8, 0.8)
sap_color = (0.4, 0.2, 0)
clist = [rock_color, sap_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# Output the initial grid to file
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time', current_time, '(',
100 * current_time/run_duration, '%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# Output the current grid to a netCDF file
time_slice += 1
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# FINALIZE
# Plot
ca_plotter.finalize()
# If user runs this file, activate the main() function
if __name__ == "__main__":
main()
|
decvalts/landlab
|
landlab/components/cellular_automata/examples/rock_weathering.py
|
Python
|
mit
| 5,558
|
[
"NetCDF"
] |
b0a2aecacbbb07ea42c462b7a68243473a81d9f580d065ff9768d2c4eabdf18a
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import hashlib
import inspect
from types import FunctionType
from ._decorator import experimental
def resolve_key(obj, key):
"""Resolve key given an object and key."""
if callable(key):
return key(obj)
elif hasattr(obj, 'metadata'):
return obj.metadata[key]
raise TypeError("Could not resolve key %r. Key must be callable or %s must"
" have `metadata` attribute." % (key,
obj.__class__.__name__))
def make_sentinel(name):
return type(name, (), {
'__repr__': lambda s: name,
'__str__': lambda s: name,
'__class__': None
})()
def find_sentinels(function, sentinel):
params = inspect.signature(function).parameters
return [name for name, param in params.items()
if param.default is sentinel]
class MiniRegistry(dict):
def __call__(self, name):
"""Act as a decorator to register functions with self"""
def decorator(func):
self[name] = func
return func
return decorator
def copy(self):
"""Useful for inheritance"""
return self.__class__(super(MiniRegistry, self).copy())
def formatted_listing(self):
"""Produce an RST list with descriptions."""
if len(self) == 0:
return "\tNone"
else:
return "\n".join(["\t%r\n\t %s" %
(name, self[name].__doc__.split("\n")[0])
for name in sorted(self)])
def interpolate(self, obj, name):
"""Inject the formatted listing in the second blank line of `name`."""
f = getattr(obj, name)
f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
# Conveniently the original docstring is on f2, not the new ones if
# inheritence is happening. I have no idea why.
t = f2.__doc__.split("\n\n")
t.insert(2, self.formatted_listing())
f2.__doc__ = "\n\n".join(t)
setattr(obj, name, f2)
def chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
@experimental(as_of="0.4.0")
def cardinal_to_ordinal(n):
"""Return ordinal string version of cardinal int `n`.
Parameters
----------
n : int
Cardinal to convert to ordinal. Must be >= 0.
Returns
-------
str
Ordinal version of cardinal `n`.
Raises
------
ValueError
If `n` is less than 0.
Notes
-----
This function can be useful when writing human-readable error messages.
Examples
--------
>>> from skbio.util import cardinal_to_ordinal
>>> cardinal_to_ordinal(0)
'0th'
>>> cardinal_to_ordinal(1)
'1st'
>>> cardinal_to_ordinal(2)
'2nd'
>>> cardinal_to_ordinal(3)
'3rd'
"""
# Taken and modified from http://stackoverflow.com/a/20007730/3776794
# Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
if n < 0:
raise ValueError("Cannot convert negative integer %d to ordinal "
"string." % n)
return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
@experimental(as_of="0.4.0")
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from io import BytesIO
>>> from skbio.util import safe_md5
>>> fd = BytesIO(b"foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
@experimental(as_of="0.4.0")
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated
|
gregcaporaso/scikit-bio
|
skbio/util/_misc.py
|
Python
|
bsd-3-clause
| 5,575
|
[
"scikit-bio"
] |
5a51c810f798e1d0b6fcd90f137dd3586158da236c00fa8c22bde18f17bbe4b1
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j), j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through `n` of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` vs. `p`
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` vs `k`
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` vs `n`
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of `btdtr` vs `a`
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of `btdtr` vs `b`
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a, b, x)
Cumulative beta distribution.
Returns the area from zero to `x` under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a, b, p)
p-th quantile of the beta distribution.
This is effectively the inverse of `btdtr` returning the value of `x` for which
``btdtr(a, b, x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real `u`.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m` = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to `x` under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to `fdtr` vs x
Finds the F density argument `x` such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a, x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a, x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Performs a logarithmic transformation of the
values of the gamma function in one of two
ways, depending on the input `z`:
1) `z` is not complex (i.e. `z` is a purely
real number *or* it is array_like and
contains purely real elements)
The natural logarithm of the absolute value of
gamma(z) is computed. Thus, it is defined as:
ln(abs(gamma(z)))
2) `z` is complex (i.e. `z` is a complex
number *or* it is array_like and contains
at least one complex element)
The natural logarithm of gamma(z) is computed.
Thus, it is defined as:
ln((gamma(z))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at), t=0..x).
The arguments `a` and `b` are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integral of Airy functions from 0 to `x`
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v, z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If `z` is of real type and negative, `v` must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order `n`.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order `v`
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order `v`
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
See Also
--------
kvp : Derivative of this function
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order `v` at `x`, `x` must
be positive unless `v` is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through `k` of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j, j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k, n, p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`
Finds the argument p such that ``nbdtr(k, n, p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`
Finds the argument k such that ``nbdtr(k, n, p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`
Finds the argument `n` such that ``nbdtr(k, n, p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
`z` (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1), t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v, x)
Struve function
Computes the struve function Hv(x) of order `v` at `x`, `x` must be
positive unless `v` is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at `x`.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at `x`.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n, x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order `n`
at `x`.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v, z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order `v` at
complex `z`.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta function).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
|
Shaswat27/scipy
|
scipy/special/add_newdocs.py
|
Python
|
bsd-3-clause
| 74,473
|
[
"Gaussian"
] |
1501cf4f0ed7a28ccf93326976a4becce0dd1931c0fa038910868f9d05eca9ee
|
'''
sbclearn (c) University of Manchester 2017
sbclearn is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=ungrouped-imports
import sys
import numpy as np
import pandas as pd
from sbclearn.ccs import chem
from sbclearn.utils.validator import k_fold_cross_valid
def get_data(filename):
'''Gets data.'''
df = pd.read_csv(filename)
x_data = np.array([chem.get_fingerprint(smiles, radius=8)
for smiles in df.SMILES])
return x_data, df.CCS
def main(args):
'''main method.'''
x_data, y_data = get_data(args[0])
results = k_fold_cross_valid((x_data, y_data))
sbclearn.plot(results, 'Prediction of ccs')
if __name__ == '__main__':
main(sys.argv[1:])
|
neilswainston/development-py
|
synbiochemdev/sbclearn/ccs/ccs_learn.py
|
Python
|
mit
| 879
|
[
"VisIt"
] |
7ab384eb9a84278c96f8c3ffdb23b62af46dbcceca192174833551714081417c
|
#######################################
# pyGPGO examples
# hyperpost: shows posterior distribution of hyperparameters
# for a Gaussian Process example
#######################################
import numpy as np
from pyGPGO.surrogates.GaussianProcessMCMC import GaussianProcessMCMC
from pyGPGO.covfunc import matern32
if __name__ == '__main__':
np.random.seed(1337)
sexp = matern32()
gp = GaussianProcessMCMC(sexp, niter=2000, init='MAP', step=None)
X = np.linspace(0, 6, 7)[:, None]
y = np.sin(X).flatten()
gp.fit(X, y)
gp.posteriorPlot()
|
hawk31/pyGPGO
|
examples/hyperpost.py
|
Python
|
mit
| 570
|
[
"Gaussian"
] |
8c8c3c1a57c93ab689ccbca37962af665842fb1f59bcfed53b110872846232bf
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Truhlar) of hydrogen-transfer barrier height reactions.
| Geometries from Truhlar and coworkers at site http://t1.chem.umn.edu/misc/database_group/database_therm_bh/raw_geom.cgi (broken link).
| Reference energies from Zhao et al. JPCA, 109 2012-2018 (2005) doi: 10.1021/jp045141s [in supporting information].
- **cp** ``'off'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
"""
import re
import qcdb
# <<< HTBH Database Module >>>
dbse = 'HTBH'
isOS = 'true'
# <<< Database Members >>>
HRXN = range(1, 39)
HRXN_SM = ['5', '6', '9', '10', '23', '24']
HRXN_LG = ['13', '14', '33', '34', '37', '38']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 1)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HHClts') ]
RXNM['%s-%s' % (dbse, 1)] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 2)] = ['%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'HHClts') ]
RXNM['%s-%s' % (dbse, 2)] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 3)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'OHH2ts') ]
RXNM['%s-%s' % (dbse, 3)] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 4)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'H2O' ),
'%s-%s-reagent' % (dbse, 'OHH2ts') ]
RXNM['%s-%s' % (dbse, 4)] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 5)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'CH3H2ts') ]
RXNM['%s-%s' % (dbse, 5)] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 6)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'CH3H2ts') ]
RXNM['%s-%s' % (dbse, 6)] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 7)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'OHCH4ts') ]
RXNM['%s-%s' % (dbse, 7)] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 8)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'H2O' ),
'%s-%s-reagent' % (dbse, 'OHCH4ts') ]
RXNM['%s-%s' % (dbse, 8)] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 9)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'HH2ts') ]
RXNM['%s-%s' % (dbse, 9)] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 10)] = ['%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HH2ts') ]
RXNM['%s-%s' % (dbse, 10)] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 11)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'NH3' ),
'%s-%s-reagent' % (dbse, 'OHNH3ts') ]
RXNM['%s-%s' % (dbse, 11)] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 12)] = ['%s-%s-reagent' % (dbse, 'H2O' ),
'%s-%s-reagent' % (dbse, 'NH2' ),
'%s-%s-reagent' % (dbse, 'OHNH3ts') ]
RXNM['%s-%s' % (dbse, 12)] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 13)] = ['%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'HClCH3ts') ]
RXNM['%s-%s' % (dbse, 13)] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 14)] = ['%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'HClCH3ts') ]
RXNM['%s-%s' % (dbse, 14)] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 15)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'C2H6' ),
'%s-%s-reagent' % (dbse, 'OHC2H6ts') ]
RXNM['%s-%s' % (dbse, 15)] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 16)] = ['%s-%s-reagent' % (dbse, 'H2O' ),
'%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'OHC2H6ts') ]
RXNM['%s-%s' % (dbse, 16)] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 17)] = ['%s-%s-reagent' % (dbse, 'F' ),
'%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'FH2ts') ]
RXNM['%s-%s' % (dbse, 17)] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 18)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'FH2ts') ]
RXNM['%s-%s' % (dbse, 18)] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 19)] = ['%s-%s-reagent' % (dbse, 'O' ),
'%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'OHCH3ts') ]
RXNM['%s-%s' % (dbse, 19)] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 20)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'OHCH3ts') ]
RXNM['%s-%s' % (dbse, 20)] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 21)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'PH3' ),
'%s-%s-reagent' % (dbse, 'HPH3ts') ]
RXNM['%s-%s' % (dbse, 21)] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 22)] = ['%s-%s-reagent' % (dbse, 'PH2' ),
'%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'HPH3ts') ]
RXNM['%s-%s' % (dbse, 22)] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 23)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'OHHts') ]
RXNM['%s-%s' % (dbse, 23)] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 24)] = ['%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'O' ),
'%s-%s-reagent' % (dbse, 'OHHts') ]
RXNM['%s-%s' % (dbse, 24)] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 25)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'H2S' ),
'%s-%s-reagent' % (dbse, 'HH2Sts') ]
RXNM['%s-%s' % (dbse, 25)] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 26)] = ['%s-%s-reagent' % (dbse, 'H2' ),
'%s-%s-reagent' % (dbse, 'HS' ),
'%s-%s-reagent' % (dbse, 'HH2Sts') ]
RXNM['%s-%s' % (dbse, 26)] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 27)] = ['%s-%s-reagent' % (dbse, 'O' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'OHClts') ]
RXNM['%s-%s' % (dbse, 27)] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 28)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'OHClts') ]
RXNM['%s-%s' % (dbse, 28)] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 29)] = ['%s-%s-reagent' % (dbse, 'NH2' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'CH3NH2ts') ]
RXNM['%s-%s' % (dbse, 29)] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 30)] = ['%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'NH' ),
'%s-%s-reagent' % (dbse, 'CH3NH2ts') ]
RXNM['%s-%s' % (dbse, 30)] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 31)] = ['%s-%s-reagent' % (dbse, 'NH2' ),
'%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'NH2C2H5ts') ]
RXNM['%s-%s' % (dbse, 31)] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 32)] = ['%s-%s-reagent' % (dbse, 'C2H6' ),
'%s-%s-reagent' % (dbse, 'NH' ),
'%s-%s-reagent' % (dbse, 'NH2C2H5ts') ]
RXNM['%s-%s' % (dbse, 32)] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 33)] = ['%s-%s-reagent' % (dbse, 'C2H6' ),
'%s-%s-reagent' % (dbse, 'NH2' ),
'%s-%s-reagent' % (dbse, 'C2H6NH2ts') ]
RXNM['%s-%s' % (dbse, 33)] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 34)] = ['%s-%s-reagent' % (dbse, 'NH3' ),
'%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'C2H6NH2ts') ]
RXNM['%s-%s' % (dbse, 34)] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 35)] = ['%s-%s-reagent' % (dbse, 'NH2' ),
'%s-%s-reagent' % (dbse, 'CH4' ),
'%s-%s-reagent' % (dbse, 'NH2CH4ts') ]
RXNM['%s-%s' % (dbse, 35)] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 36)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'NH3' ),
'%s-%s-reagent' % (dbse, 'NH2CH4ts') ]
RXNM['%s-%s' % (dbse, 36)] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 37)] = ['%s-%s-reagent' % (dbse, 'C5H8' ),
'%s-%s-reagent' % (dbse, 'C5H8ts') ]
RXNM['%s-%s' % (dbse, 37)] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1, +1]))
ACTV['%s-%s' % (dbse, 38)] = ['%s-%s-reagent' % (dbse, 'C5H8' ),
'%s-%s-reagent' % (dbse, 'C5H8ts') ]
RXNM['%s-%s' % (dbse, 38)] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1, +1]))
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = 5.7
BIND['%s-%s' % (dbse, 2)] = 8.7
BIND['%s-%s' % (dbse, 3)] = 5.1
BIND['%s-%s' % (dbse, 4)] = 21.2
BIND['%s-%s' % (dbse, 5)] = 12.1
BIND['%s-%s' % (dbse, 6)] = 15.3
BIND['%s-%s' % (dbse, 7)] = 6.7
BIND['%s-%s' % (dbse, 8)] = 19.6
BIND['%s-%s' % (dbse, 9)] = 9.6
BIND['%s-%s' % (dbse, 10)] = 9.6
BIND['%s-%s' % (dbse, 11)] = 3.2
BIND['%s-%s' % (dbse, 12)] = 12.7
BIND['%s-%s' % (dbse, 13)] = 1.7
BIND['%s-%s' % (dbse, 14)] = 7.9
BIND['%s-%s' % (dbse, 15)] = 3.4
BIND['%s-%s' % (dbse, 16)] = 19.9
BIND['%s-%s' % (dbse, 17)] = 1.8
BIND['%s-%s' % (dbse, 18)] = 33.4
BIND['%s-%s' % (dbse, 19)] = 13.7
BIND['%s-%s' % (dbse, 20)] = 8.1
BIND['%s-%s' % (dbse, 21)] = 3.1
BIND['%s-%s' % (dbse, 22)] = 23.2
BIND['%s-%s' % (dbse, 23)] = 10.7
BIND['%s-%s' % (dbse, 24)] = 13.1
BIND['%s-%s' % (dbse, 25)] = 3.5
BIND['%s-%s' % (dbse, 26)] = 17.3
BIND['%s-%s' % (dbse, 27)] = 9.8
BIND['%s-%s' % (dbse, 28)] = 10.4
BIND['%s-%s' % (dbse, 29)] = 8.0
BIND['%s-%s' % (dbse, 30)] = 22.4
BIND['%s-%s' % (dbse, 31)] = 7.5
BIND['%s-%s' % (dbse, 32)] = 18.3
BIND['%s-%s' % (dbse, 33)] = 10.4
BIND['%s-%s' % (dbse, 34)] = 17.4
BIND['%s-%s' % (dbse, 35)] = 14.5
BIND['%s-%s' % (dbse, 36)] = 17.8
BIND['%s-%s' % (dbse, 37)] = 38.4
BIND['%s-%s' % (dbse, 38)] = 38.4
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = '{ H + HCl <-- [HHCl] } --> H2 + Cl'
TAGL['%s-%s' % (dbse, 2)] = 'H + HCl <-- { [HHCl] --> H2 + Cl }'
TAGL['%s-%s' % (dbse, 3)] = '{ OH + H2 <-- [OHH2] } --> H + H2O'
TAGL['%s-%s' % (dbse, 4)] = 'OH + HCl <-- { [OHH2] --> H + H2O }'
TAGL['%s-%s' % (dbse, 5)] = '{ CH3 + H2 <-- [CH3H2] } --> H + CH4'
TAGL['%s-%s' % (dbse, 6)] = 'CH3 + H2 <-- { [CH3H2] --> H + CH4 }'
TAGL['%s-%s' % (dbse, 7)] = '{ OH + CH4 <-- [OHCH4] } --> CH3 + H2O'
TAGL['%s-%s' % (dbse, 8)] = 'OH + CH4 <-- { [OHCH4] --> CH3 + H2O }'
TAGL['%s-%s' % (dbse, 9)] = '{ H + H2 <-- [HH2] } --> H2 + H'
TAGL['%s-%s' % (dbse, 10)] = 'H + H2 <-- { [HH2] -- >H2 + H }'
TAGL['%s-%s' % (dbse, 11)] = '{ OH + NH3 <-- [OHNH3] } --> H2O + NH2'
TAGL['%s-%s' % (dbse, 12)] = 'OH + NH3 <-- { [OHNH3] --> H2O + NH2 }'
TAGL['%s-%s' % (dbse, 13)] = '{ HCl + CH3 <-- [HClCH3] } --> Cl + CH4'
TAGL['%s-%s' % (dbse, 14)] = 'HCl + CH3 <-- { [HClCH3] --> Cl + CH4 }'
TAGL['%s-%s' % (dbse, 15)] = '{ OH + C2H6 <-- [OHC2H6] } --> H2O + C2H5'
TAGL['%s-%s' % (dbse, 16)] = 'OH + C2H6 <-- { [OHC2H6] --> H2O + C2H5 }'
TAGL['%s-%s' % (dbse, 17)] = '{ F + H2 <-- [FH2] } --> HF + H'
TAGL['%s-%s' % (dbse, 18)] = 'F + H2 <-- { [FH2] --> HF + H}'
TAGL['%s-%s' % (dbse, 19)] = '{ O + CH4 <-- [OHCH3] } --> OH + CH3'
TAGL['%s-%s' % (dbse, 20)] = 'O + CH4 <-- { [OHCH3] --> OH + CH3 }'
TAGL['%s-%s' % (dbse, 21)] = '{ H + PH3 <-- [HPH3] } --> PH2 + H2'
TAGL['%s-%s' % (dbse, 22)] = 'H + PH3 <-- { [HPH3] --> PH2 + H2 }'
TAGL['%s-%s' % (dbse, 23)] = '{ H + OH <-- [OHH] } --> H2 + O'
TAGL['%s-%s' % (dbse, 24)] = 'H + OH <-- { [OHH] --> H2 + O }'
TAGL['%s-%s' % (dbse, 25)] = '{ H + H2S <-- [HH2S] } --> H2 + HS'
TAGL['%s-%s' % (dbse, 26)] = 'H + H2S <-- { [HH2S] --> H2 + HS}'
TAGL['%s-%s' % (dbse, 27)] = '{ O + HCl <-- [OHCl] } --> OH + Cl'
TAGL['%s-%s' % (dbse, 28)] = 'O + HCl <-- { [OHCl] --> OH + Cl}'
TAGL['%s-%s' % (dbse, 29)] = '{ NH2 + CH3 <-- [CH3NH2] } --> CH4 + NH'
TAGL['%s-%s' % (dbse, 30)] = 'NH2 + CH3 <-- { [CH3NH2] --> CH4 + NH }'
TAGL['%s-%s' % (dbse, 31)] = '{ NH2 + C2H5 <-- [NH2C2H5] } --> C2H6 + NH'
TAGL['%s-%s' % (dbse, 32)] = 'NH2 + C2H5 <-- { [NH2C2H5] --> C2H6 + NH }'
TAGL['%s-%s' % (dbse, 33)] = '{ C2H6 + NH2 <-- [C2H6NH2] } --> NH3 + C2H5'
TAGL['%s-%s' % (dbse, 34)] = 'C2H6 + NH2 <-- { [C2H6NH2] --> NH3 + C2H5 }'
TAGL['%s-%s' % (dbse, 35)] = '{ NH2 + CH4 <-- [NH2CH4] } --> CH3 + NH3'
TAGL['%s-%s' % (dbse, 36)] = 'NH2 + CH4 <-- { [NH2CH4] --> CH3 + NH3 }'
TAGL['%s-%s' % (dbse, 37)] = '{ C5H8 <-- [C5H8] } --> C5H8'
TAGL['%s-%s' % (dbse, 38)] = 'C5H8 <-- { [C5H8] --> C5H8 }'
TAGL['%s-%s-reagent' % (dbse, 'C2H5' )] = 'C2H5'
TAGL['%s-%s-reagent' % (dbse, 'C2H6' )] = 'Ethane'
TAGL['%s-%s-reagent' % (dbse, 'C2H6NH2ts' )] = 'Transition state of C2H6 + NH2 <--> NH3 + C2H5'
TAGL['%s-%s-reagent' % (dbse, 'C5H8' )] = 's-trans cis-C5H8'
TAGL['%s-%s-reagent' % (dbse, 'C5H8ts' )] = 'Transition state of s-trans cis-C5H8 <--> s-trans cis C5H8'
TAGL['%s-%s-reagent' % (dbse, 'CH3' )] = 'CH3'
TAGL['%s-%s-reagent' % (dbse, 'CH3H2ts' )] = 'Transition state of CH3 + H2 <--> H + CH4'
TAGL['%s-%s-reagent' % (dbse, 'CH3NH2ts' )] = 'Transition state of CH3 + NH2 <--> CH4 + NH'
TAGL['%s-%s-reagent' % (dbse, 'CH4' )] = 'Methane'
TAGL['%s-%s-reagent' % (dbse, 'Cl' )] = 'Chlorine atom'
TAGL['%s-%s-reagent' % (dbse, 'F' )] = 'Fluorine atom'
TAGL['%s-%s-reagent' % (dbse, 'FH2ts' )] = 'Transition state of F + H2 <--> HF + H'
TAGL['%s-%s-reagent' % (dbse, 'H' )] = 'Hydrogen atom'
TAGL['%s-%s-reagent' % (dbse, 'H2' )] = 'Hydrogen molecule'
TAGL['%s-%s-reagent' % (dbse, 'H2O' )] = 'Water'
TAGL['%s-%s-reagent' % (dbse, 'H2S' )] = 'Hydrogen Sulfide'
TAGL['%s-%s-reagent' % (dbse, 'HCl' )] = 'Hydrogen Chloride'
TAGL['%s-%s-reagent' % (dbse, 'HClCH3ts' )] = 'Transition state of HCl + CH3 <--> Cl + CH4'
TAGL['%s-%s-reagent' % (dbse, 'HHClts' )] = 'Transition state of H + HCl <--> H2 + Cl'
TAGL['%s-%s-reagent' % (dbse, 'HF' )] = 'Hydrogen Fluoride'
TAGL['%s-%s-reagent' % (dbse, 'HH2Sts' )] = 'Transition state of H + H2S <--> H2 + HS'
TAGL['%s-%s-reagent' % (dbse, 'HH2ts' )] = 'Transition state of H + H2 <--> H2 + H'
TAGL['%s-%s-reagent' % (dbse, 'NH' )] = 'NH'
TAGL['%s-%s-reagent' % (dbse, 'HPH3ts' )] = 'Transition state of H + PH3 <--> PH2 + H2'
TAGL['%s-%s-reagent' % (dbse, 'NH2' )] = 'NH2'
TAGL['%s-%s-reagent' % (dbse, 'NH2C2H5ts' )] = 'Transition state of C2H5 + NH2 <--> NH + C2H6'
TAGL['%s-%s-reagent' % (dbse, 'NH2CH4ts' )] = 'Transition state of CH4 + NH2 <--> NH3 + CH3'
TAGL['%s-%s-reagent' % (dbse, 'NH3' )] = 'Ammonia'
TAGL['%s-%s-reagent' % (dbse, 'O' )] = 'Oxygen atom'
TAGL['%s-%s-reagent' % (dbse, 'OH' )] = 'OH'
TAGL['%s-%s-reagent' % (dbse, 'OHC2H6ts' )] = 'Transition state of C2H6 + OH <--> H2O + C2H5'
TAGL['%s-%s-reagent' % (dbse, 'OHCH3ts' )] = 'Transition state of O + CH4 <--> OH + CH3'
TAGL['%s-%s-reagent' % (dbse, 'OHCH4ts' )] = 'Transition state of OH + CH4 <--> CH3 + H2O'
TAGL['%s-%s-reagent' % (dbse, 'OHClts' )] = 'Transition state of O + HCl <--> OH + Cl'
TAGL['%s-%s-reagent' % (dbse, 'OHH2ts' )] = 'Transition state of OH + H2 <--> H + H2O'
TAGL['%s-%s-reagent' % (dbse, 'OHHts' )] = 'Transition state of OH + H <--> H2 + O'
TAGL['%s-%s-reagent' % (dbse, 'OHNH3ts' )] = 'Transition state of OH + NH3 <--> NH2 + H2O'
TAGL['%s-%s-reagent' % (dbse, 'PH2' )] = 'PH2'
TAGL['%s-%s-reagent' % (dbse, 'PH3' )] = 'Phosphine'
TAGL['%s-%s-reagent' % (dbse, 'HS' )] = 'HS'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-reagent' % (dbse, 'C2H5')] = qcdb.Molecule("""
0 2
C 0.00550995 -0.00307714 -0.77443959
C 0.00550995 -0.00307714 0.71569982
H 0.00550995 -1.01684444 1.11670108
H 0.37964525 0.84547158 -1.32730429
H -0.88217468 0.49798042 1.12141209
H 0.87299475 0.52193057 1.11660682
H -0.50718726 -0.77526005 -1.32801142
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H6')] = qcdb.Molecule("""
0 1
C 0.00000020 -0.00000013 -0.76309187
C 0.00000020 -0.00000013 0.76309163
H 0.00000020 -1.01606691 1.15831231
H -0.87903844 -0.50959541 -1.15830943
H -0.87994508 0.50802887 1.15831013
H 0.87993813 0.50804049 1.15830883
H -0.00180313 1.01606605 -1.15830975
H 0.88084363 -0.50646996 -1.15830912
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H6NH2ts')] = qcdb.Molecule("""
0 2
C -1.48570000 -0.44815600 -0.00001900
C -0.50504200 0.70174000 0.00002900
N 1.86516100 -0.34016700 -0.00005700
H -1.35419300 -1.07650500 -0.88050300
H -1.35415900 -1.07661100 0.88038500
H -2.51702500 -0.08617300 0.00002500
H -0.52222400 1.31611800 -0.89721800
H -0.52220500 1.31602900 0.89733800
H 0.66504700 0.14796100 -0.00003400
H 2.24664400 0.15971700 -0.80480600
H 2.24643900 0.15913300 0.80515100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C5H8')] = qcdb.Molecule("""
0 1
C -2.05563800 -0.61227200 0.00000700
C -1.23109600 0.64044800 0.00004900
C 0.10563400 0.73427300 0.00002600
C 1.05755500 -0.37440700 -0.00004400
C 2.38358300 -0.19893600 -0.00003600
H -2.70508500 -0.64159700 0.87713200
H -2.70512900 -0.64150800 -0.87708900
H -1.45133200 -1.51607900 -0.00005500
H -1.79366500 1.56758600 0.00010300
H 0.54575600 1.72564300 0.00006400
H 0.66526200 -1.38324200 -0.00010500
H 3.06468900 -1.03771900 -0.00008800
H 2.81927500 0.79228500 0.00002300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C5H8ts')] = qcdb.Molecule("""
0 1
C -1.29962300 -0.90485300 -0.02015500
C -1.20594700 0.50581700 -0.01341400
C 0.00000000 1.18336100 0.15330100
C 1.20594800 0.50581400 -0.01342200
C 1.29962600 -0.90485100 -0.02014700
H 2.16879700 -1.32754900 -0.51569700
H 1.03204100 -1.45438500 0.87316600
H 2.03713000 1.08558300 -0.39850400
H 0.00000100 2.26291300 0.08590500
H -2.03713300 1.08558700 -0.39848100
H -2.16879600 -1.32754000 -0.51571600
H -0.00001100 -1.18194200 -0.52080800
H -1.03205900 -1.45439400 0.87315800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 -0.00000000
H 0.00000000 0.00000000 1.07731727
H -0.00000000 0.93298412 -0.53865863
H 0.00000000 -0.93298412 -0.53865863
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3H2ts')] = qcdb.Molecule("""
0 2
C 0.00000000 0.26481300 0.00000000
H 1.05342900 0.51666800 0.00000000
H -0.52662700 0.51702500 0.91225000
H -0.52662700 0.51702500 -0.91225000
H -0.00026000 -1.11777100 0.00000000
H 0.00008400 -2.02182500 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3NH2ts')] = qcdb.Molecule("""
0 3
C -1.19957700 -0.01112600 -0.00003000
N 1.40071500 0.12986200 0.00001500
H -1.42666000 -0.51293200 0.93305700
H -1.41990700 -0.59138200 -0.88814300
H -1.52023700 1.02280600 -0.04578300
H 0.18892600 0.12689600 0.00100100
H 1.57033800 -0.88766700 -0.00005300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH4')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 1.08744517 0.00000000
H -0.51262657 -0.36248173 0.88789526
H -0.51262657 -0.36248173 -0.88789526
H 1.02525314 -0.36248173 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl')] = qcdb.Molecule("""
0 2
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F')] = qcdb.Molecule("""
0 2
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FH2ts')] = qcdb.Molecule("""
0 2
H 0.14656800 -1.12839000 0.00000000
F 0.00000000 0.33042200 0.00000000
H -0.14656800 -1.84541000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H2')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 0.00000000
H 0.74187646 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H2O')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 -0.06555155
H 0.00000000 -0.75670946 0.52017534
H 0.00000000 0.75670946 0.52017534
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H2S')] = qcdb.Molecule("""
0 1
S 0.00000000 0.00000000 0.10251900
H 0.00000000 0.96624900 -0.82015400
H 0.00000000 -0.96624900 -0.82015400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCl')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 1.27444789 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HClCH3ts')] = qcdb.Molecule("""
0 2
C 0.24411700 0.59991600 1.70242300
H -0.67559700 0.27848200 2.17293900
H 0.35191000 1.66378600 1.53767200
H 1.14068600 0.06578700 1.98782200
H 0.05716300 0.13997300 0.39711200
Cl -0.13758000 -0.33809000 -0.95941600
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HHClts')] = qcdb.Molecule("""
0 2
H 0.00048000 -1.34062700 0.00000000
Cl 0.00000000 0.20325200 0.00000000
H -0.00048000 -2.11465900 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.91538107 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HH2Sts')] = qcdb.Molecule("""
0 2
H 1.26209700 -0.22009700 0.00000000
S 0.00000000 0.22315300 0.00000000
H -0.50057600 -1.11544500 0.00000000
H -0.76152100 -2.23491300 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HH2ts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 0.92947400
H 0.00000000 0.00000000 -0.92947400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'NH')] = qcdb.Molecule("""
0 3
N 0.00000000 0.00000000 0.00000000
H 1.03673136 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HPH3ts')] = qcdb.Molecule("""
0 2
P 0.21742900 0.00008800 -0.11124900
H 0.24660900 1.03466800 0.85216400
H 0.26266100 -1.02505800 0.86162300
H -1.26641800 -0.01095200 -0.15062600
H -2.50429000 0.00002800 0.10557500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'NH2')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 -0.08007491
H 0.00000000 -0.80231373 0.55629442
H 0.00000000 0.80231373 0.55629442
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'NH2C2H5ts')] = qcdb.Molecule("""
0 3
C -1.39498400 -0.44966100 0.00070300
C -0.43574600 0.71406300 0.00202700
N 1.92757000 -0.37835200 0.00303600
H -1.20008700 -1.12095100 -0.83568700
H -1.32209500 -1.02788400 0.92177300
H -2.42871300 -0.10535200 -0.08933400
H -0.41768800 1.30848200 -0.90720100
H -0.44112700 1.32909500 0.89746700
H 0.82850100 0.18059300 -0.02856100
H 2.47259200 0.49807300 0.00391000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'NH2CH4ts')] = qcdb.Molecule("""
0 2
C -1.26075000 -0.00000600 0.01229100
N 1.31325500 -0.00000500 -0.13678200
H -1.58398700 0.90853800 -0.48474400
H -1.46367200 -0.00457300 1.07730200
H -1.58474800 -0.90388000 -0.49270000
H 0.04310800 -0.00006400 -0.15169200
H 1.48045900 0.80557700 0.46775100
H 1.48055700 -0.80552400 0.46780800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'NH3')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.11289000
H 0.00000000 0.93802400 -0.26340900
H 0.81235300 -0.46901200 -0.26340900
H -0.81235300 -0.46901200 -0.26340900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'O')] = qcdb.Molecule("""
0 3
O 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH')] = qcdb.Molecule("""
0 2
O 0.00000000 0.00000000 0.00000000
H 0.96889819 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHC2H6ts')] = qcdb.Molecule("""
0 2
C 1.45833400 -0.44636500 0.02547800
C 0.46942300 0.69742200 -0.02749300
O -1.85303700 -0.31465900 -0.05305500
H 1.30176400 -1.06107900 0.91073700
H 1.36658500 -1.08618900 -0.85111800
H 2.48224500 -0.06687900 0.05715000
H 0.47106900 1.32544300 0.86103700
H 0.53352400 1.30349500 -0.92856000
H -0.63023200 0.20781600 -0.07846500
H -2.26720700 0.38832100 0.46575100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHCH3ts')] = qcdb.Molecule("""
0 3
C 0.00029000 -1.14228900 0.00000000
H -1.05595700 -1.38473500 0.00000000
H 0.52016700 -1.40738900 0.91244700
H 0.52016700 -1.40738900 -0.91244700
H 0.01156000 0.16009900 0.00000000
O 0.00029000 1.36164300 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHCH4ts')] = qcdb.Molecule("""
0 2
C -1.21148700 0.00796800 0.00040700
O 1.29396500 -0.10869400 0.00013300
H 0.00947600 -0.11802000 0.00279900
H -1.52552900 -0.23325000 1.01007000
H -1.43066500 1.03323300 -0.27808200
H -1.55271000 -0.71011400 -0.73770200
H 1.41663600 0.84989400 -0.00059100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHClts')] = qcdb.Molecule("""
0 3
Cl 0.01882000 -0.81730100 0.00000000
H -0.47048800 0.56948000 0.00000000
O 0.01882000 1.66557900 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHH2ts')] = qcdb.Molecule("""
0 2
O -0.30106400 -0.10804900 -0.00000800
H -0.42794500 0.85156900 0.00001600
H 1.01548600 -0.10036700 0.00011900
H 1.82096800 0.11318700 -0.00007300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHHts')] = qcdb.Molecule("""
0 3
H 0.00000000 0.00000000 -0.86028700
O 0.00000000 0.00000000 0.32902400
H 0.00000000 0.00000000 -1.77190500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OHNH3ts')] = qcdb.Molecule("""
0 2
N -1.15081600 -0.04393200 -0.10255900
O 1.17918600 -0.09269600 -0.01029000
H -1.30318500 -0.54763800 0.76657100
H -1.33891300 0.93580800 0.09185400
H -0.03068700 -0.15383400 -0.35318400
H 1.29500900 0.81475300 0.29499100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'PH2')] = qcdb.Molecule("""
0 2
P 0.00000000 0.00000000 -0.11565700
H 1.02013000 0.00000000 0.86742700
H -1.02013000 0.00000000 0.86742700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'PH3')] = qcdb.Molecule("""
0 1
P 0.00000000 0.00000000 0.12641100
H 1.19133900 0.00000000 -0.63205600
H -0.59566900 -1.03173000 -0.63205600
H -0.59566900 1.03173000 -0.63205600
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HS')] = qcdb.Molecule("""
0 2
S 0.00000000 0.00000000 0.00000000
H 1.34020229 0.00000000 0.00000000
units angstrom
""")
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['HTBH-H-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HCl-reagent' ] = 7.05875275
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HHClts-reagent' ] = 10.39163823
DATA['NUCLEAR REPULSION ENERGY']['HTBH-H2-reagent' ] = 0.71329559
DATA['NUCLEAR REPULSION ENERGY']['HTBH-Cl-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OH-reagent' ] = 4.36931115
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHH2ts-reagent' ] = 10.73785396
DATA['NUCLEAR REPULSION ENERGY']['HTBH-H2O-reagent' ] = 9.19771594
DATA['NUCLEAR REPULSION ENERGY']['HTBH-CH3-reagent' ] = 9.69236444
DATA['NUCLEAR REPULSION ENERGY']['HTBH-CH3H2ts-reagent' ] = 15.32861238
DATA['NUCLEAR REPULSION ENERGY']['HTBH-CH4-reagent' ] = 13.46695412
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHCH4ts-reagent' ] = 37.11882096
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HH2ts-reagent' ] = 1.42332440
DATA['NUCLEAR REPULSION ENERGY']['HTBH-NH3-reagent' ] = 11.97232339
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHNH3ts-reagent' ] = 37.13900482
DATA['NUCLEAR REPULSION ENERGY']['HTBH-NH2-reagent' ] = 7.56429116
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HClCH3ts-reagent' ] = 46.25151943
DATA['NUCLEAR REPULSION ENERGY']['HTBH-C2H6-reagent' ] = 42.29535986
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHC2H6ts-reagent' ] = 76.62129511
DATA['NUCLEAR REPULSION ENERGY']['HTBH-C2H5-reagent' ] = 36.98165035
DATA['NUCLEAR REPULSION ENERGY']['HTBH-F-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['HTBH-FH2ts-reagent' ] = 6.11540453
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HF-reagent' ] = 5.20285489
DATA['NUCLEAR REPULSION ENERGY']['HTBH-O-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHCH3ts-reagent' ] = 30.91033235
DATA['NUCLEAR REPULSION ENERGY']['HTBH-PH3-reagent' ] = 17.63061432
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HPH3ts-reagent' ] = 21.01063452
DATA['NUCLEAR REPULSION ENERGY']['HTBH-PH2-reagent' ] = 11.46498480
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHHts-reagent' ] = 6.15505787
DATA['NUCLEAR REPULSION ENERGY']['HTBH-H2S-reagent' ] = 12.94849742
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HH2Sts-reagent' ] = 16.45756641
DATA['NUCLEAR REPULSION ENERGY']['HTBH-HS-reagent' ] = 6.31758012
DATA['NUCLEAR REPULSION ENERGY']['HTBH-OHClts-reagent' ] = 38.62988868
DATA['NUCLEAR REPULSION ENERGY']['HTBH-CH3NH2ts-reagent' ] = 33.45955425
DATA['NUCLEAR REPULSION ENERGY']['HTBH-NH-reagent' ] = 3.57299934
DATA['NUCLEAR REPULSION ENERGY']['HTBH-NH2C2H5ts-reagent' ] = 71.85720179
DATA['NUCLEAR REPULSION ENERGY']['HTBH-C2H6NH2ts-reagent' ] = 78.78495055
DATA['NUCLEAR REPULSION ENERGY']['HTBH-NH2CH4ts-reagent' ] = 39.42842411
DATA['NUCLEAR REPULSION ENERGY']['HTBH-C5H8-reagent' ] = 155.81524012
DATA['NUCLEAR REPULSION ENERGY']['HTBH-C5H8ts-reagent' ] = 164.93671263
|
psi4/psi4
|
psi4/share/psi4/databases/HTBH.py
|
Python
|
lgpl-3.0
| 38,476
|
[
"Psi4"
] |
4aeb5d604659f87df3055e2f3220d09fa00e52261285c84341e21b8e49deef3c
|
"""
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normcase, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [fspath(arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
""" rename this path to target. """
target = fspath(target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, fspath(p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = normcase(prefix)
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
nbasename = normcase(path.basename)
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
""" copy permission from src to dst. """
py.std.shutil.copymode(src, dest)
def copystat(src, dest):
""" copy permission, last modification time, last access time, and flags from src to dst."""
py.std.shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
|
cvegaj/ElectriCERT
|
venv3/lib/python3.6/site-packages/py/_path/local.py
|
Python
|
gpl-3.0
| 33,568
|
[
"VisIt"
] |
868c013b7ba2377ece21834286fb5165fd571a86e080ab71038e3ad160b41592
|
from __future__ import print_function
import random
import time
from numpy import random as nprand
import moose
def make_network():
size = 1024
timestep = 0.2
runtime = 100.0
delayMin = timestep
delayMax = 4
weightMax = 0.02
Vmax = 1.0
thresh = 0.2
tau = 1 # Range of tau
tau0 = 0.5 # minimum tau
refr = 0.3
refr0 = 0.2
connectionProbability = 0.1
random.seed( 123 )
nprand.seed( 456 )
t0 = time.time()
clock = moose.element( '/clock' )
network = moose.IntFire( 'network', size, 1 );
network.vec.bufferTime = [delayMax * 2] * size
moose.le( '/network' )
network.vec.numSynapses = [1] * size
# Interesting. This fails because we haven't yet allocated
# the synapses. I guess it is fair to avoid instances of objects that
# don't have allocations.
#synapse = moose.element( '/network/synapse' )
sv = moose.vec( '/network/synapse' )
print('before connect t = ', time.time() - t0)
mid = moose.connect( network, 'spikeOut', sv, 'addSpike', 'Sparse')
print('after connect t = ', time.time() - t0)
#print mid.destFields
m2 = moose.element( mid )
m2.setRandomConnectivity( connectionProbability, 5489 )
print('after setting connectivity, t = ', time.time() - t0)
network.vec.Vm = [(Vmax*random.random()) for r in range(size)]
network.vec.thresh = thresh
network.vec.refractoryPeriod = [( refr0 + refr * random.random()) for r in range( size) ]
network.vec.tau = [(tau0 + tau*random.random()) for r in range(size)]
numSynVec = network.vec.numSynapses
print('Middle of setup, t = ', time.time() - t0)
numTotSyn = sum( numSynVec )
for item in network.vec:
neuron = moose.element( item )
neuron.synapse.delay = [ (delayMin + random.random() * delayMax) for r in range( len( neuron.synapse ) ) ]
neuron.synapse.weight = nprand.rand( len( neuron.synapse ) ) * weightMax
print('after setup, t = ', time.time() - t0, ", numTotSyn = ", numTotSyn)
"""
netvec = network.vec
for i in range( size ):
synvec = netvec[i].synapse.vec
synvec.weight = [ (random.random() * weightMax) for r in range( synvec.len )]
synvec.delay = [ (delayMin + random.random() * delayMax) for r in range( synvec.len )]
"""
#moose.useClock( 9, '/postmaster', 'process' )
moose.useClock( 0, '/network', 'process' )
moose.setClock( 0, timestep )
moose.setClock( 9, timestep )
t1 = time.time()
moose.reinit()
print('reinit time t = ', time.time() - t1)
network.vec.Vm = [(Vmax*random.random()) for r in range(size)]
print('setting Vm , t = ', time.time() - t1)
t1 = time.time()
print('starting')
moose.start(runtime)
print('runtime, t = ', time.time() - t1)
print('Vm100:103', network.vec.Vm[100:103])
print('Vm900:903', network.vec.Vm[900:903])
print('weights 100:', network.vec[100].synapse.delay[0:5])
print('weights 900:', network.vec[900].synapse.delay[0:5])
make_network()
|
subhacom/moose-core
|
tests/python/mpi/recurrentIntFire.py
|
Python
|
gpl-3.0
| 2,818
|
[
"MOOSE",
"NEURON"
] |
18e8976ed824a5c9f86b35936321a52699b0a7bd0581c75e889e8d3566e3b70d
|
#!/usr/bin/env python
import numpy
import scipy.io.wavfile
import scipy.fftpack
import scipy.signal
import defs
WINDOWSIZE = defs.ZEROPADDING_FACTOR * defs.BUFFER_WINDOWSIZE
### convenience functions
def amplitude2db(power):
return 20.0 * scipy.log10( power )
def db2amplitude(db):
return 10.0**(db/20.0)
def hertz2bin(freq, sample_rate):
return freq*(WINDOWSIZE/2+1) / (float(sample_rate)/2)
def bin2hertz(bin_number, sample_rate):
return bin_number * (sample_rate/2) / (float(WINDOWSIZE)/2+1)
### file IO
def get_buffers_from_file(wav_filename, num_buffers=None, bins=None):
sample_rate, data_unnormalized = scipy.io.wavfile.read(wav_filename)
windowsize = defs.BUFFER_WINDOWSIZE
hopsize = defs.HOPSIZE
if bins is not None:
windowsize = bins
hopsize = hopsize
if num_buffers:
data_unnormalized = data_unnormalized[0:(
windowsize + (num_buffers-1) * hopsize)]
data = data_unnormalized / float(numpy.iinfo(data_unnormalized.dtype).max)
# split into overlapping windows
window_buffers = []
for window_index in range(
(len(data)/hopsize - windowsize/hopsize)+1):
index_start = window_index*hopsize
index_end = window_index*hopsize + windowsize
window = data[ index_start : index_end ]
window_buffers.append(window)
return window_buffers, sample_rate
def get_long_buffer_from_file(wav_filename, num_buffers=None, bins=None):
sample_rate, data_unnormalized = scipy.io.wavfile.read(wav_filename)
windowsize = defs.LONG_WINDOWSIZE
data_unnormalized = data_unnormalized[0:(
windowsize + (num_buffers-1) * 1)]
data = data_unnormalized / float(numpy.iinfo(data_unnormalized.dtype).max)
# split into overlapping windows
return data, sample_rate
### FFT stuff
def stft_amplitude(window_buffer, zeropadding=defs.ZEROPADDING_FACTOR):
#window_function = scipy.signal.gaussian(len(window_buffer),
# len(window_buffer)/8)
#window_function = scipy.signal.get_window("hanning",
# len(window_buffer))
window_function = scipy.signal.get_window("hamming",
len(window_buffer))
buf = window_buffer*window_function
N = zeropadding*len(buf)
fft = scipy.fftpack.fft(buf, N)
fft_abs = abs(fft[:N/2+1])
### see test-normalization.py
fft_normalized = fft_abs / (sum(window_function)/2)
return fft_normalized
def fft_amplitude(window_buffer, sample_rate):
#seconds = numpy.arange(0, len(window_buffer)) / float(sample_rate)
#a = -10.0
#window_function = numpy.exp(seconds * a)
zeropadding = 2
window_function = scipy.signal.get_window("hamming",
len(window_buffer))
buf = window_buffer*window_function
#import pylab
#pylab.plot(buf)
#pylab.show()
N = zeropadding*len(buf)
fft = scipy.fftpack.fft(buf, N)
fft_abs = abs(fft[:N/2+1])
fft_normalized = fft_abs / (sum(window_function)/2)
return fft_normalized
|
gperciva/artifastring
|
research/mode-detect/stft.py
|
Python
|
gpl-3.0
| 2,992
|
[
"Gaussian"
] |
01aef76705b0d6f9177b6276e7f27afccaae13d3a2d56e1e449d2421ce9c53af
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-get-job-pilots
# Author : Stuart Paterson
########################################################################
"""
Retrieve info about pilots that have matched a given Job
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID' % Script.scriptName,
'Arguments:',
' JobID: DIRAC ID of the Job' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for job in args:
try:
job = int( job )
except Exception, x:
errorList.append( ( job, 'Expected integer for jobID' ) )
exitCode = 2
continue
result = diracAdmin.getJobPilots( job )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRACExit( exitCode )
|
Sbalbp/DIRAC
|
Interfaces/scripts/dirac-admin-get-job-pilots.py
|
Python
|
gpl-3.0
| 1,367
|
[
"DIRAC"
] |
d31f7860f163cb57fc9b6cb58cfc052824f7643f1ccc58cb5eda97e18de37eb3
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
"""
ZNCC using Pyramids
"""
__author__ = "Per-Erik Forssén"
__copyright__ = "Copyright 2013, Per-Erik Forssén"
__license__ = "GPL"
__email__ = "perfo@isy.liu.se"
import logging
logger = logging.getLogger()
import numpy as np
def gaussian_kernel(gstd):
"""Generate odd sized truncated Gaussian
The generated filter kernel has a cutoff at $3\sigma$
and is normalized to sum to 1
Parameters
-------------
gstd : float
Standard deviation of filter
Returns
-------------
g : ndarray
Array with kernel coefficients
"""
Nc = np.ceil(gstd*3)*2+1
x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True)
g = np.exp(-.5*((x/gstd)**2))
g = g/np.sum(g)
return g
def subsample(time_series, downsample_factor):
"""Subsample with Gaussian prefilter
The prefilter will have the filter size $\sigma_g=.5*ssfactor$
Parameters
--------------
time_series : ndarray
Input signal
downsample_factor : float
Downsampling factor
Returns
--------------
ts_out : ndarray
The downsampled signal
"""
Ns = np.int(np.floor(np.size(time_series)/downsample_factor))
g = gaussian_kernel(0.5*downsample_factor)
ts_blur = np.convolve(time_series,g,'same')
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = (k+.5)*downsample_factor-.5
cfrac = cpos-np.floor(cpos)
cind = np.floor(cpos)
if cfrac>0:
ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac
else:
ts_out[k]=ts_blur[cind]
return ts_out
def upsample(time_series, scaling_factor):
"""Upsample using linear interpolation
The function uses replication of the value at edges
Parameters
--------------
time_series : ndarray
Input signal
scaling_factor : float
The factor to upsample with
Returns
--------------
ts_out : ndarray
The upsampled signal
"""
Ns0 = np.size(time_series)
Ns = np.int(np.floor(np.size(time_series)*scaling_factor))
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])]))
cfrac = cpos-np.floor(cpos)
cind = int(np.floor(cpos))
#print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind)
if cfrac>0:
ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac
else:
ts_out[k]=time_series[cind]
return ts_out
def do_binning(time_series,factor):
Ns = np.size(time_series) // factor
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
ts_out[k]=0
for l in range(0,factor):
ts_out[k] += time_series[k*factor+l]
ts_out[k] /= factor
return ts_out
def create_pyramid(time_series,octaves):
pyr_out = [time_series ]
for k in range(0,octaves):
pyr_out.append(do_binning(pyr_out[-1],2))
return pyr_out
def zncc(ts1,ts2):
"""Zero mean normalised cross-correlation (ZNCC)
This function does ZNCC of two signals, ts1 and ts2
Normalisation by very small values is avoided by doing
max(nmin,nvalue)
Parameters
--------------
ts1 : ndarray
Input signal 1 to be aligned with
ts2 : ndarray
Input signal 2
Returns
--------------
best_shift : float
The best shift of *ts1* to align it with *ts2*
ts_out : ndarray
The correlation result
"""
# Output is the same size as ts1
Ns1 = np.size(ts1)
Ns2 = np.size(ts2)
ts_out = np.zeros((Ns1,1), dtype='float64')
ishift = int(np.floor(Ns2/2)) # origin of ts2
t1m = np.mean(ts1)
t2m = np.mean(ts2)
for k in range(0,Ns1):
lstart = np.int(ishift-k)
if lstart<0 :
lstart=0
lend = np.int(ishift-k+Ns2)
imax = np.int(np.min([Ns2,Ns1-k+ishift]))
if lend>imax :
lend=imax
csum = 0
ts1sum = 0
ts1sum2 = 0
ts2sum = 0
ts2sum2 = 0
Nterms = lend-lstart
for l in range(lstart,lend):
csum += ts1[k+l-ishift]*ts2[l]
ts1sum += ts1[k+l-ishift]
ts1sum2 += ts1[k+l-ishift]*ts1[k+l-ishift]
ts2sum += ts2[l]
ts2sum2 += ts2[l]*ts2[l]
ts1sum2 = np.max([t1m*t1m*100,ts1sum2])-ts1sum*ts1sum/Nterms
ts2sum2 = np.max([t2m*t2m*100,ts2sum2])-ts2sum*ts2sum/Nterms
#ts_out[k]=csum/np.sqrt(ts1sum2*ts2sum2)
ts_out[k]=(csum-2.0*ts1sum*ts2sum/Nterms+ts1sum*ts2sum/Nterms/Nterms)/np.sqrt(ts1sum2*ts2sum2)
best_shift = np.argmax(ts_out)-ishift
return best_shift, ts_out
def refine_correlation(ts1,ts2,shift_guess):
"""Refine a rough guess of shift by evaluating ZNCC for similar values
Shifts of *ts1* are tested in the range [-2:2]
Refine a rough guess of shift, by trying neighbouring ZNCC values
in the range [-2:2]
Parameters
----------------
ts1 : list_like
The first timeseries
ts2 : list_like
The seconds timeseries
shift_guess : float
The guess to start from
Returns
---------------
best_shift : float
The best shift of those tested
ts_out : ndarray
Computed correlation values
"""
Ns1 = np.size(ts1)
Ns2 = np.size(ts2)
ts_out = np.zeros((5,1))
ishift = int(np.floor(Ns2/2)) # origin of ts2
k_offset = shift_guess-2+ishift # Try shifts starting with this one
t1m = np.mean(ts1)
t2m = np.mean(ts2)
for k in range(0,5):
km = k+k_offset
lstart = np.int(ishift-km)
if lstart<0 :
lstart=0
lend = np.int(ishift-km+Ns2)
imax = np.int(np.min([Ns2,Ns1-km+ishift]))
if lend>imax :
lend=imax
csum = 0
ts1sum = 0
ts1sum2 = 0
ts2sum = 0
ts2sum2 = 0
Nterms = lend-lstart
for l in range(lstart,lend):
csum += ts1[km+l-ishift]*ts2[l]
ts1sum += ts1[km+l-ishift]
ts1sum2 += ts1[km+l-ishift]*ts1[km+l-ishift]
ts2sum += ts2[l]
ts2sum2 += ts2[l]*ts2[l]
ts1sum2 = np.max([t1m*t1m*100,ts1sum2])-ts1sum*ts1sum/Nterms
ts2sum2 = np.max([t2m*t2m*100,ts2sum2])-ts2sum*ts2sum/Nterms
#ts_out[k]=csum/np.sqrt(ts1sum2*ts2sum2)
ts_out[k]=(csum-2.0*ts1sum*ts2sum/Nterms+ts1sum*ts2sum/Nterms/Nterms)/np.sqrt(ts1sum2*ts2sum2)
best_shift = np.argmax(ts_out)+k_offset-ishift
return best_shift, ts_out
def find_shift_pyr(ts1,ts2,nlevels):
"""
Find shift that best aligns two time series
The shift that aligns the timeseries ts1 with ts2.
This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels.
Parameters
----------------
ts1 : list_like
The first timeseries
ts2 : list_like
The seconds timeseries
nlevels : int
Number of levels in pyramid
Returns
----------------
ts1_shift : float
How many samples to shift ts1 to align with ts2
"""
pyr1 = create_pyramid(ts1,nlevels)
pyr2 = create_pyramid(ts2,nlevels)
logger.debug("pyramid size = %d" % len(pyr1))
logger.debug("size of first element %d " % np.size(pyr1[0]))
logger.debug("size of last element %d " % np.size(pyr1[-1]))
ishift, corrfn = zncc(pyr1[-1],pyr2[-1])
for k in range(1,nlevels+1):
ishift, corrfn = refine_correlation(pyr1[-k-1],pyr2[-k-1],ishift*2)
return ishift
|
spillai/crisp
|
crisp/znccpyr.py
|
Python
|
gpl-3.0
| 7,934
|
[
"Gaussian"
] |
cc4fe3741522a97c11ace4f098c5ff7b4bf2fa5c49145f524685981bd59c5128
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.