text stringlengths 0 1.05M | meta dict |
|---|---|
"""A .. collapse:: directive for sphinx-bootstrap-theme."""
import os.path as op
from docutils import nodes
from docutils.parsers.rst.directives import flag, class_option
from docutils.parsers.rst.roles import set_classes
from docutils.statemachine import StringList
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset
this_dir = op.dirname(__file__)
__version__ = '0.1.0.dev0'
###############################################################################
# Super classes
class DivNode(nodes.Body, nodes.Element):
"""Generic DivNode class."""
def __init__(self, **options):
diff = set(options.keys()).symmetric_difference(set(self.OPTION_KEYS))
assert len(diff) == 0, (diff, self.__class__.__name__)
self.options = options
super().__init__()
def visit_node(self, node):
"""Visit the node."""
atts = {}
if node.BASECLASS:
atts['class'] = node.BASECLASS
if node.options.get('class'):
atts['class'] += \
' {}-{}'.format(node.BASECLASS, node.options['class'])
self.body.append(self.starttag(node, node.ELEMENT, **atts))
def depart_node(self, node):
"""Depart the node."""
self.body.append('</{}>'.format(node.ELEMENT))
def _assemble(node, directive):
title_text = directive.arguments[0]
directive.add_name(node)
header = node.HEADER_PRETITLE.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(header), directive.content_offset, node)
textnodes, messages = directive.state.inline_text(
title_text, directive.lineno)
node += textnodes
node += messages
header = node.HEADER_POSTTITLE.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(header), directive.content_offset, node)
directive.state.nested_parse(
directive.content, directive.content_offset, node)
footer = node.FOOTER.format(**node.options).split('\n')
directive.state.nested_parse(
StringList(footer), directive.content_offset, node)
###############################################################################
# .. collapse::
class CollapseNode(DivNode):
"""Class for .. collapse:: directive."""
OPTION_KEYS = ('title', 'id_', 'extra', 'class')
ELEMENT = 'div'
BASECLASS = 'panel'
HEADER_PRETITLE = """.. raw:: html
<div class="panel-heading"><h4 class="panel-title">
<a data-toggle="collapse" href="#collapse_{id_}">"""
HEADER_POSTTITLE = """.. raw:: html
</a></h4></div>
<div id="collapse_{id_}" class="panel-collapse collapse{extra}">
<div class="panel-body">"""
FOOTER = """.. raw:: html
</div></div>"""
KNOWN_CLASSES = (
'default', 'primary', 'success', 'info', 'warning', 'danger')
@staticmethod
def _check_class(class_):
if class_ not in CollapseNode.KNOWN_CLASSES:
raise ValueError(':class: option %r must be one of %s'
% (class_, CollapseNode.KNOWN_CLASSES))
return class_
class CollapseDirective(SphinxDirective):
"""Collapse directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'open': flag,
'class': CollapseNode._check_class}
has_content = True
def run(self):
"""Parse."""
self.assert_has_content()
title_text = _(self.arguments[0])
extra = _(' in' if 'open' in self.options else '')
class_ = {'class': self.options.get('class', 'default')}
id_ = nodes.make_id(title_text)
node = CollapseNode(title=title_text, id_=id_, extra=extra, **class_)
_assemble(node, self)
return [node]
###############################################################################
# .. details::
class DetailsNode(DivNode):
"""Class for .. details:: directive."""
ELEMENT = 'details'
BASECLASS = ''
OPTION_KEYS = ('title', 'class')
HEADER_PRETITLE = """.. raw:: html
<summary>"""
HEADER_POSTTITLE = """.. raw:: html
</summary>"""
FOOTER = """"""
class DetailsDirective(SphinxDirective):
"""Details directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': class_option}
has_content = True
def run(self):
"""Parse."""
set_classes(self.options)
self.assert_has_content()
title_text = _(self.arguments[0])
class_ = {'class': self.options.get('class', '')}
node = DetailsNode(title=title_text, **class_)
_assemble(node, self)
return [node]
###############################################################################
# Generic setup
def setup(app):
"""Set up for Sphinx app."""
directives = dict(
collapse=CollapseDirective,
details=DetailsDirective,
)
for key, value in directives.items():
app.add_directive(key, value)
try:
app.add_css_file('bootstrap_divs.css')
except AttributeError:
app.add_stylesheet('bootstrap_divs.css')
try:
app.add_js_file('bootstrap_divs.js')
except AttributeError:
app.add_javascript('bootstrap_divs.js')
app.connect('build-finished', copy_asset_files)
for node in (CollapseNode, DetailsNode):
app.add_node(node,
html=(node.visit_node, node.depart_node),
latex=(node.visit_node, node.depart_node),
text=(node.visit_node, node.depart_node))
return dict(version='0.1', parallel_read_safe=True,
parallel_write_safe=True)
def copy_asset_files(app, exc):
"""Copy static assets."""
asset_files = ['bootstrap_divs.css', 'bootstrap_divs.js']
if exc is None: # build succeeded
for path in asset_files:
copy_asset(op.join(this_dir, path),
op.join(app.outdir, '_static'))
| {
"repo_name": "cjayb/mne-python",
"path": "doc/sphinxext/sphinx_bootstrap_divs/__init__.py",
"copies": "6",
"size": "6079",
"license": "bsd-3-clause",
"hash": 7310866247350506000,
"line_mean": 30.0153061224,
"line_max": 79,
"alpha_frac": 0.5704885672,
"autogenerated": false,
"ratio": 3.9320827943078913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7502571361507892,
"avg_score": null,
"num_lines": null
} |
"""A .. collapse:: directive for sphinx-bootstrap-theme."""
import os.path as op
from docutils import nodes
from docutils.parsers.rst.directives import flag
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset
this_dir = op.dirname(__file__)
__version__ = '0.1.0.dev0'
###############################################################################
# Super classes
class DivNode(nodes.General, nodes.Element):
"""Generic DivNode class."""
def __init__(self, **options):
diff = set(options.keys()).symmetric_difference(set(self.OPTION_KEYS))
assert len(diff) == 0, (diff, self.__class__.__name__)
self.options = options
super().__init__()
@staticmethod
def visit_node(self, node):
"""Visit the node."""
self.body.append(node.HEADER.format(**node.options))
@staticmethod
def depart_node(self, node):
"""Depart the node."""
self.body.append(node.FOOTER)
###############################################################################
# .. collapse::
class CollapseNode(DivNode):
"""Class for .. collapse:: directive."""
OPTION_KEYS = ('title', 'id_', 'extra', 'class_')
HEADER = """
<div class="panel panel-{class_}">
<div class="panel-heading"><h4 class="panel-title">
<a data-toggle="collapse" href="#collapse_{id_}">{title}</a>
</h4></div>
<div id="collapse_{id_}" class="panel-collapse collapse{extra}">
<div class="panel-body">
"""
FOOTER = "</div></div></div>"
KNOWN_CLASSES = (
'default', 'primary', 'success', 'info', 'warning', 'danger')
@staticmethod
def _check_class(class_):
if class_ not in CollapseNode.KNOWN_CLASSES:
raise ValueError(':class: option %r must be one of %s'
% (class_, CollapseNode.KNOWN_CLASSES))
return class_
class CollapseDirective(SphinxDirective):
"""Collapse directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'open': flag,
'class': CollapseNode._check_class}
has_content = True
def run(self):
"""Parse."""
env = self.state.document.settings.env
self.assert_has_content()
extra = _(' in' if 'open' in self.options else '')
title = _(self.arguments[0])
class_ = self.options.get('class', 'default')
id_ = env.new_serialno('Collapse')
collapse_node = CollapseNode(title=title, id_=id_, extra=extra,
class_=class_)
self.add_name(collapse_node)
self.state.nested_parse(
self.content, self.content_offset, collapse_node)
return [collapse_node]
###############################################################################
# .. details::
class DetailsNode(DivNode):
"""Class for .. details:: directive."""
OPTION_KEYS = ('title', 'class_')
HEADER = """
<details class="{class_}"><summary>{title}</summary>"""
FOOTER = "</details>"
class DetailsDirective(SphinxDirective):
"""Details directive."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': str}
has_content = True
def run(self):
"""Parse."""
self.assert_has_content()
title = _(self.arguments[0])
class_ = self.options.get('class', '')
details_node = DetailsNode(title=title, class_=class_)
self.add_name(details_node)
self.state.nested_parse(
self.content, self.content_offset, details_node)
return [details_node]
###############################################################################
# Generic setup
def setup(app):
"""Set up for Sphinx app."""
directives = dict(
collapse=CollapseDirective,
details=DetailsDirective,
)
for key, value in directives.items():
app.add_directive(key, value)
try:
app.add_css_file('bootstrap_divs.css')
except AttributeError:
app.add_stylesheet('bootstrap_divs.css')
try:
app.add_js_file('bootstrap_divs.js')
except AttributeError:
app.add_javascript('bootstrap_divs.js')
app.connect('build-finished', copy_asset_files)
for node in (CollapseNode, DetailsNode):
app.add_node(node,
html=(node.visit_node, node.depart_node),
latex=(node.visit_node, node.depart_node),
text=(node.visit_node, node.depart_node))
return dict(version='0.1', parallel_read_safe=True,
parallel_write_safe=True)
def copy_asset_files(app, exc):
"""Copy static assets."""
asset_files = ['bootstrap_divs.css', 'bootstrap_divs.js']
if exc is None: # build succeeded
for path in asset_files:
copy_asset(op.join(this_dir, path),
op.join(app.outdir, '_static'))
| {
"repo_name": "adykstra/mne-python",
"path": "doc/sphinxext/sphinx_bootstrap_divs/__init__.py",
"copies": "1",
"size": "5024",
"license": "bsd-3-clause",
"hash": 2431707970534065000,
"line_mean": 30.4,
"line_max": 79,
"alpha_frac": 0.5577229299,
"autogenerated": false,
"ratio": 4.009577015163607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 160
} |
"""A collection base class"""
class Collection():
"""A collection base class that introduces nested getting and setting"""
#The default value for get_in; normally None
DFLT = None
def get(self, index, dflt=None):
"""This method needs to be implemented to subclass Collection"""
raise NotImplementedError("You need to implement get to use the Collection base class")
def __setitem__(self, *args, **kwargs):
raise NotImplementedError("You need to implement __setitem__ to use the Collection \
base class")
def get_in(self, *keys, **kwargs):
"""
A getter for deeply nested values.
Complexity: the complexity differs depending on the collection's lookup/get complexity
params:
*keys: they keys that should be worked through
**kwargs: this only accepts dflt (the default element, normally DFLT), all others
will be ignored
returns:
the element that was found or the default element
"""
key = keys[0]
cont = len(keys) > 1
elem = self.get(key)
if elem is None:
return kwargs.get("dflt", self.DFLT)
if not cont or not hasattr(elem, "get_in"):
return elem
return elem.get_in(*keys[1:], **kwargs)
def update_in(self, *keys, **kwargs):
"""
A setter for deeply nested values.
Complexity: the complexity differs depending on the collection's lookup/set complexity
params:
*keys: they keys that should be worked through
**kwargs: this only accepts to (a needed argument that specifies what the element
should be set to), all others will be ignored
returns: self
throws: Whatever a collection returns when a key does not exist (mostly Index- or KeyError)
"""
key = keys[0]
cont = len(keys) > 1
elem = self.get(key)
if cont:
elem.update_in(*keys[1:], **kwargs)
else:
self[key] = kwargs["to"]
return self
| {
"repo_name": "hellerve/hawkweed",
"path": "hawkweed/classes/collection.py",
"copies": "1",
"size": "2140",
"license": "mit",
"hash": 6436789742818975000,
"line_mean": 33.5161290323,
"line_max": 99,
"alpha_frac": 0.5869158879,
"autogenerated": false,
"ratio": 4.602150537634409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5689066425534409,
"avg_score": null,
"num_lines": null
} |
"""A collection of agents written in Python"""
from __future__ import unicode_literals, print_function
from cyclus.agents import Region, Institution, Facility
from cyclus import typesystem as ts
class NullRegion(Region):
"""A simple do nothing region."""
class NullInst(Institution):
"""An instition that owns facilities in the simulation but exhibits
null behavior. No parameters are given when using the null institution.
"""
class Sink(Facility):
"""This sink facility accepts specified amount of commodity."""
in_commods = ts.VectorString(
doc="commodities that the sink facility accepts.",
tooltip="input commodities for the sink",
uilabel="List of Input Commodities",
uitype=["oneormore", "incommodity"],
)
recipe = ts.String(
tooltip="input/request recipe name",
doc="Name of recipe to request. If empty, sink requests material no "
"particular composition.",
default="",
uilabel="Input Recipe",
uitype="recipe",
)
max_inv_size = ts.Double(
default=1e299,
doc="total maximum inventory size of sink facility",
uilabel= "Maximum Inventory",
tooltip="sink maximum inventory size",
)
capacity = ts.Double(
doc="capacity the sink facility can accept at each time step",
uilabel="Maximum Throughput",
tooltip="sink capacity",
default=100.0,
)
inventory = ts.ResourceBuffInv(capacity='max_inv_size')
def get_material_requests(self):
if len(self.recipe) == 0:
comp = {}
else:
comp = self.context.get_recipe(self.recipe)
mat = ts.Material.create_untracked(self.capacity, comp)
port = {"commodities": {c: mat for c in self.in_commods},
"constraints": self.capacity}
return port
def get_product_requests(self):
prod = ts.Product.create_untracked(self.capacity, "")
port = {"commodities": {c: prod for c in self.in_commods},
"constraints": self.capacity}
return port
def accept_material_trades(self, responses):
for mat in responses.values():
self.inventory.push(mat)
def accept_product_trades(self, responses):
for prod in responses.values():
self.inventory.push(prod)
class Source(Facility):
"""A minimum implementation source facility that provides a commodity with
a given capacity.
"""
commod = ts.String(
doc="commodity that the source facility supplies",
tooltip="source commodity",
schematype="token",
uilabel="Commodity",
uitype="outcommodity",
)
recipe_name = ts.String(
doc="Recipe name for source facility's commodity. "
"If empty, source supplies material with requested compositions.",
tooltip="commodity recipe name",
schematype="token",
default="",
uilabel="Recipe",
uitype="recipe",
)
capacity = ts.Double(
doc="amount of commodity that can be supplied at each time step",
uilabel="Maximum Throughput",
tooltip="source capacity",
)
def build(self, parent):
super(Source, self).build(parent)
if self.lifetime >= 0:
self.context.schedule_decom(self, self.exit_time)
def get_material_bids(self, requests):
reqs = requests.get(self.commod, None)
if not reqs:
return
if len(self.recipe_name) == 0:
bids = [req for req in reqs]
else:
recipe_comp = self.context.get_recipe(self.recipe_name)
bids = []
for req in reqs:
qty = min(req.target.quantity, self.capacity)
mat = ts.Material.create_untracked(qty, recipe_comp)
bids.append({'request': req, 'offer': mat})
return {'bids': bids, 'constraints': self.capacity}
def get_material_trades(self, trades):
responses = {}
if len(self.recipe_name) == 0:
for trade in trades:
mat = ts.Material.create(self, trade.amt, trade.request.target.comp())
responses[trade] = mat
else:
recipe_comp = self.context.get_recipe(self.recipe_name)
for trade in trades:
mat = ts.Material.create(self, trade.amt, recipe_comp)
responses[trade] = mat
return responses
| {
"repo_name": "Baaaaam/cyclus",
"path": "cyclus/pyagents.py",
"copies": "6",
"size": "4499",
"license": "bsd-3-clause",
"hash": 6950369749500685000,
"line_mean": 33.8759689922,
"line_max": 86,
"alpha_frac": 0.6019115359,
"autogenerated": false,
"ratio": 4.0568079350766455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000591252564409193,
"num_lines": 129
} |
"""A collection of analytical results which return the exact solution to a
trial problem to benchmark particle movers
Includes:
* Motion in a constant electric field
* Motion in a constant magnetic field
* Crossed motion in perpendicular electric and magnetic fields
"""
__author__ = 'swebb'
import numpy as np
import scipy.linalg as linalg
import scipy.constants as consts
class PtclTests:
def __init__(self):
"""does nothing"""
def constnonrelmagfield(self, x0, v0, q, m, B, t):
"""
Compute the motion of a charged particle with initial x0 and v0
through a constant magnetic field
:param x0: initial position
:param v0: initial velocity
:param q: charge
:param m: mass
:param B: magnetic field
:param t: time to propagate
:return: x, v after propagating for time t
"""
# Normalize the magnetic field
tau = q*B/(m)
# Solve for the velocity using matrix exponentiation
bMatrix = np.matrix([[0., tau[2], -1.*tau[1]],
[-1.*tau[2], 0., tau[0]],
[tau[1], -1.*tau[0], 0.]])
print 'BB = ', bMatrix
greenFunc = linalg.expm(bMatrix*t)
print 'greenFunc = ', greenFunc
vfinal = greenFunc*np.matrix(v0).T
# Compute x using some matrix identities
xfinal = x0 + bMatrix.I*(greenFunc - np.identity(3))*np.matrix(v0).T
# get shapes right
vout = np.zeros(3)
xout = np.zeros(3)
vout[0] = vfinal[0,0]
vout[1] = vfinal[1,0]
vout[2] = vfinal[2,0]
xout[0] = xfinal[0,0]
xout[1] = xfinal[1,0]
xout[2] = xfinal[2,0]
return xout, vout
def constrelmagfield(self, x0, u0, q, m, B, t):
"""
Compute the relativistic motion of a charged particle with initial x0
and u0 through a constant magnetic field
:param x0: initial position
:param v0: initial velocity beta*gamma
:param q: charge
:param m: mass
:param B: magnetic field
:param t: time to propagate
:return: x, v after propagating for time t
"""
# Properly normalize the vectors
gamma = np.sqrt(np.dot(u0, u0)/consts.c**2 + 1)
v0 = u0/gamma
tau = q * B/(m * gamma)
# Solve for the velocity using matrix exponentiation
bMatrix = np.matrix([[0., tau[2], -1.*tau[1]],
[-1.*tau[2], 0., tau[0]],
[tau[1], -1.*tau[0], 0.]])
greenFunc = linalg.expm(bMatrix*t)
vfinal = greenFunc*np.matrix(v0).T
# Compute x using some matrix identities
xfinal = x0 + bMatrix.I*(greenFunc - np.identity(3))*np.matrix(v0).T
# get shapes right
vout = np.zeros(3)
xout = np.zeros(3)
vout[0] = vfinal[0,0]
vout[1] = vfinal[1,0]
vout[2] = vfinal[2,0]
xout[0] = xfinal[0,0]
xout[1] = xfinal[1,0]
xout[2] = xfinal[2,0]
return xout, vout | {
"repo_name": "radiasoft/radtrack",
"path": "experimental/ode/PtclTests.py",
"copies": "1",
"size": "3124",
"license": "apache-2.0",
"hash": -8707012088017183000,
"line_mean": 28.2056074766,
"line_max": 77,
"alpha_frac": 0.5457746479,
"autogenerated": false,
"ratio": 3.355531686358754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9377098189462374,
"avg_score": 0.004841628959276018,
"num_lines": 107
} |
"""a collection of Annotation-related models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask_appbuilder import Model
from sqlalchemy import (
Column, DateTime, ForeignKey, Index, Integer, String, Text,
)
from sqlalchemy.orm import relationship
from superset.models.helpers import AuditMixinNullable
class AnnotationLayer(Model, AuditMixinNullable):
"""A logical namespace for a set of annotations"""
__tablename__ = 'annotation_layer'
id = Column(Integer, primary_key=True)
name = Column(String(250))
descr = Column(Text)
def __repr__(self):
return self.name
class Annotation(Model, AuditMixinNullable):
"""Time-related annotation"""
__tablename__ = 'annotation'
id = Column(Integer, primary_key=True)
start_dttm = Column(DateTime)
end_dttm = Column(DateTime)
layer_id = Column(Integer, ForeignKey('annotation_layer.id'))
short_descr = Column(String(500))
long_descr = Column(Text)
layer = relationship(
AnnotationLayer,
backref='annotation')
__table_args__ = (
Index('ti_dag_state', layer_id, start_dttm, end_dttm),
)
@property
def data(self):
return {
'start_dttm': self.start_dttm,
'end_dttm': self.end_dttm,
'short_descr': self.short_descr,
'long_descr': self.long_descr,
'layer': self.layer.name if self.layer else None,
}
| {
"repo_name": "alanmcruickshank/superset-dev",
"path": "superset/models/annotations.py",
"copies": "1",
"size": "1544",
"license": "apache-2.0",
"hash": -7130549895319735000,
"line_mean": 26.5714285714,
"line_max": 65,
"alpha_frac": 0.6528497409,
"autogenerated": false,
"ratio": 3.86,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
"""A collection of base classes and utility objects for building an abstract
syntax tree representing a Mathematica program. """
from typing import List
from Types import TypeBase, SymbolType
class ASTNode:
"""
The base class of all AST node classes.
"""
def __init__(self, children: list('ASTNode') = None,
parent: 'ASTNode' = None,
type_: TypeBase = None,
exprs: list = None,
):
"""
Creates a node of the AST.
:param children: A list of ASTNode objects that are children of this node.
:param parent: The parent_scope ASTNode of this node.
:param exprs: A list of the ANTLR SyntaxTree objects that represent this node.
:param type_: The type of the value held by this node.
"""
self.children = children
self.parent = parent
# A list of the ANTLR SyntaxTree objects that represent this node.
self.exprs = exprs
# The type returned or represented by the node.
self._type = type_
# Subclasses can register names for their child nodes which can be
# accessed with __getattr__.
self._named_children: dict = None
def __getattr__(self, key: str):
# Implements a mechanism to have child nodes with names as
# attributes. See _add_named_child().
# Check to see if key is one of the named children.
if not self._named_children or key not in self._named_children:
raise AttributeError(key)
# If key is the name of a named child, find its index within
# self.children.
index = self._named_children[key]
# Check that this node has children.
if not self.children:
return None
# Try to retrieve the child.
try:
return self.children[index]
except IndexError:
pass
return None
def __setattr__(self, key, value):
# Implements a mechanism to have child nodes with names as
# attributes. See _add_named_child().
# Check to see if key is one of the named children.
if not self._named_children or key not in self._named_children:
# Nope, not one of ours.
super().__setattr__(key, value)
# Since key is the name of a named child, find its index within
# self.children.
index = self._named_children[key]
# Check that this node has children.
if not self.children:
self.children = []
# Check that key's index exists.
if len(self.children) < index + 1:
# This index in self.children doesn't exist yet. We just extend
# self.children with a list of None's.
amount = index + 1 - len(self.children)
self.children.extend(amount * [None])
# Set the value.
self.children[index] = value
def _add_named_child(self, name: str, index: int):
"""
Subclasses can register names for children at given indices in
self.children so that a child with a name can be accessed by
ASTNode.name.
:param name: String, the name of the child.
:param index: Integer, the index in self.children at which the child
lives.
:return:
"""
# Lazy instantiation.
if not self._named_children:
self._named_children = dict()
self._named_children[name] = index
def _add_named_children(self, names: List[str]):
"""
Adds named children, automatically indexing starting at 0.
:param names: List[str], the names of the children in order.
:return:
"""
for n, name in enumerate(names):
self._add_named_child(name, n)
def specify_type(self, type_: TypeBase):
"""
The word *specify* is intentional, as type_ must be a subtype of this
node's type unless this node's type is SymbolType.
:param type_:
:return:
"""
# TODO: Implement ASTNode.specify_type()
pass
@property
def type_(self):
return self._type
| {
"repo_name": "rljacobson/FoxySheep",
"path": "python_target/FoxySheep/AST/ASTNode.py",
"copies": "1",
"size": "4179",
"license": "bsd-2-clause",
"hash": -5567086188784332000,
"line_mean": 30.9007633588,
"line_max": 86,
"alpha_frac": 0.5881789902,
"autogenerated": false,
"ratio": 4.441020191285866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007813301111791203,
"num_lines": 131
} |
"""A collection of bits that can be set, cleared, and toggled by number.
Based on code from https://wiki.python.org/moin/BitArrays and converted to
a class-based version for ease of use.
"""
__author__ = 'Brian Landers <brian@packetslave.com>'
import array
class BitField(object):
"""A collection of bits that can be set, cleared, and toggled by number."""
__slots__ = ['__max_bit', '__bytes']
def __init__(self, bits):
if bits < 1:
raise ValueError(bits)
self.__max_bit = bits - 1
recs = bits >> 5 # number of 32-bit ints required to store bits
if bits & 31: # not an even multiple
recs += 1
self.__bytes = array.array('I', (0,) * recs) # unsigned 32-bit int
def set(self, bit):
"""Set a given bit in the field to on."""
if bit < 0 or bit > self.__max_bit:
raise ValueError(bit)
self.__bytes[bit >> 5] |= (1 << (bit & 31))
def clear(self, bit):
"""Clear a given bit in the field."""
if bit < 0 or bit > self.__max_bit:
raise ValueError(bit)
self.__bytes[bit >> 5] &= ~(1 << (bit & 31))
def toggle(self, bit):
"""Toggle a given bit in the field from off to on, or vise versa."""
if bit < 0 or bit > self.__max_bit:
raise ValueError(bit)
self.__bytes[bit >> 5] ^= (1 << (bit & 31))
def test(self, bit):
"""Returns True if a given bit in the field is on."""
if bit < 0 or bit > self.__max_bit:
raise ValueError(bit)
return 0 != self.__bytes[bit >> 5] & (1 << (bit & 31))
| {
"repo_name": "Packetslave/bitfield",
"path": "bitfield.py",
"copies": "1",
"size": "1627",
"license": "apache-2.0",
"hash": 522122701644228540,
"line_mean": 28.0535714286,
"line_max": 79,
"alpha_frac": 0.5427166564,
"autogenerated": false,
"ratio": 3.5916114790286975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9634328135428698,
"avg_score": 0,
"num_lines": 56
} |
"""A collection of cards."""
import random
from csrv.model import cards
from csrv.model.cards import card_info
# This import is just to pull in all the card definitions
import csrv.model.cards.corp
import csrv.model.cards.runner
class Deck(object):
def __init__(self, identity_name, card_names):
self.identity = cards.Registry.get(identity_name)
self.cards = []
self.is_valid = True
for name in card_names:
c = cards.Registry.get(name)
if c:
self.cards.append(c)
def _verify_less_than_three_copies(self):
"""Make sure we have no more than 3 copies of a single cards"""
card_list = {}
for c in self.cards:
card_list[c.NAME] = card_list.setdefault(c.NAME, 0) + 1
invalid_cards = filter(lambda x: card_list[x] > 3, card_list)
if len(invalid_cards):
return "Deck contains more than 3 copies of the following cards: {}".format(', '.join(invalid_cards))
def _verify_min_deck_size(self):
"""Make sure deck meets minimum deck size limit"""
if len(self.cards) < self.identity.MIN_DECK_SIZE:
self.is_valid = False
return "Deck does not meet minimum deck size requirement"
def _verify_influence_points(self):
"""Make sure deck doesnt exceed maximum influence points"""
influence_spent = reduce(lambda x,y: x+y.influence_cost(self.identity.FACTION), self.cards, 0)
if influence_spent > self.identity.MAX_INFLUENCE:
return "Deck contains {} influence but only {} allowed".format(influence_spent, self.identity.MAX_INFLUENCE)
def _verify_side_only(self, side):
"""Make sure we only have cards belonging to the correct side"""
if len(filter(lambda c: c.SIDE != side, self.cards)):
return "Deck contains cards from the other side (corp/runner)"
class CorpDeck(Deck):
"""A deck for a corp."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_in_faction_agendas(),
self._verify_agenda_points(),
self._verify_side_only(card_info.CORP)
])
def _verify_agenda_points(self):
"""Make sure deck has required agenda points based on deck size"""
agenda_points = reduce(lambda x,y: x+y.AGENDA_POINTS, self.cards, 0)
deck_size = len(self.cards)
if agenda_points/float(deck_size) < 2.0/5.0:
self.is_valid = False
return "Only {} Agenda Points in deck of {} cards".format(agenda_points, deck_size)
def _verify_in_faction_agendas(self):
"""Make sure deck only contains in faction agendas"""
agendas = filter(lambda c: c.TYPE == card_info.AGENDA, self.cards)
if len(filter(lambda a: not a.FACTION in [card_info.NEUTRAL, self.identity.FACTION], agendas)):
return "Deck contains out-of-faction Agendas"
class RunnerDeck(Deck):
"""A deck for a runner."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_side_only(card_info.RUNNER)
])
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/deck.py",
"copies": "1",
"size": "3195",
"license": "apache-2.0",
"hash": 3447125318417795600,
"line_mean": 34.5,
"line_max": 114,
"alpha_frac": 0.66885759,
"autogenerated": false,
"ratio": 3.2938144329896906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44626720229896905,
"avg_score": null,
"num_lines": null
} |
""" A collection of Chaco tools that respond to a multi-pointer interface
"""
from numpy import asarray, dot, sqrt
# Enthought library imports
from traits.api import Delegate, Dict, Enum, Instance, Int, Property, Trait, Tuple, CArray
# Chaco imports
from chaco.api import BaseTool
from chaco.tools.api import PanTool, DragZoom, LegendTool, RangeSelection
BOGUS_BLOB_ID = -1
def l2norm(v):
return sqrt(dot(v,v))
class MPPanTool(PanTool):
cur_bid = Int(BOGUS_BLOB_ID)
def normal_blob_down(self, event):
if self.cur_bid == BOGUS_BLOB_ID:
self.cur_bid = event.bid
self._start_pan(event, capture_mouse=False)
event.window.capture_blob(self, event.bid, event.net_transform())
def panning_blob_up(self, event):
if event.bid == self.cur_bid:
self.cur_bid = BOGUS_BLOB_ID
self._end_pan(event)
def panning_blob_move(self, event):
if event.bid == self.cur_bid:
self._dispatch_stateful_event(event, "mouse_move")
def panning_mouse_leave(self, event):
""" Handles the mouse leaving the plot when the tool is in the 'panning'
state.
Don't end panning.
"""
return
def _end_pan(self, event):
if hasattr(event, "bid"):
event.window.release_blob(event.bid)
PanTool._end_pan(self, event)
class MPDragZoom(DragZoom):
speed = 1.0
# The original dataspace points where blobs 1 and 2 went down
_orig_low = CArray #Trait(None, None, Tuple)
_orig_high = CArray #Trait(None, None, Tuple)
# Dataspace center of the zoom action
_center_pt = Trait(None, None, Tuple)
# Maps blob ID numbers to the (x,y) coordinates that came in.
_blobs = Dict()
# Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.
_moves = Dict()
# Properties to convert the dictionaries to map from blob ID numbers to
# a single coordinate appropriate for the axis the range selects on.
_axis_blobs = Property(Dict)
_axis_moves = Property(Dict)
def _convert_to_axis(self, d):
""" Convert a mapping of ID to (x,y) tuple to a mapping of ID to just
the coordinate appropriate for the selected axis.
"""
if self.axis == 'index':
idx = self.axis_index
else:
idx = 1-self.axis_index
d2 = {}
for id, coords in d.items():
d2[id] = coords[idx]
return d2
def _get__axis_blobs(self):
return self._convert_to_axis(self._blobs)
def _get__axis_moves(self):
return self._convert_to_axis(self._moves)
def drag_start(self, event, capture_mouse=False):
bid1, bid2 = sorted(self._moves)
xy01, xy02 = self._moves[bid1], self._moves[bid2]
self._orig_low, self._orig_high = map(asarray,
self._map_coordinate_box(xy01, xy02))
self.orig_center = (self._orig_high + self._orig_low) / 2.0
self.orig_diag = l2norm(self._orig_high - self._orig_low)
#DragZoom.drag_start(self, event, capture_mouse)
self._original_xy = xy02
c = self.component
self._orig_screen_bounds = ((c.x,c.y), (c.x2,c.y2))
self._original_data = (c.x_mapper.map_data(xy02[0]), c.y_mapper.map_data(xy02[1]))
self._prev_y = xy02[1]
if capture_mouse:
event.window.set_pointer(self.drag_pointer)
def normal_blob_down(self, event):
if len(self._blobs) < 2:
self._blobs[event.bid] = (event.x, event.y)
event.window.capture_blob(self, event.bid,
transform=event.net_transform())
event.handled = True
def normal_blob_up(self, event):
self._handle_blob_leave(event)
def normal_blob_move(self, event):
self._handle_blob_move(event)
def normal_blob_frame_end(self, event):
if len(self._moves) == 2:
self.event_state = "dragging"
self.drag_start(event, capture_mouse=False)
def dragging_blob_move(self, event):
self._handle_blob_move(event)
def dragging_blob_frame_end(self, event):
# Get dataspace coordinates of the previous and new coordinates
bid1, bid2 = sorted(self._moves)
p1, p2 = self._blobs[bid1], self._blobs[bid2]
low, high = map(asarray, self._map_coordinate_box(p1, p2))
# Compute the amount of translation
center = (high + low) / 2.0
translation = center - self.orig_center
# Computing the zoom factor. We have the coordinates of the original
# blob_down events, and we have a new box as well. For now, just use
# the relative sizes of the diagonals.
diag = l2norm(high - low)
zoom = self.speed * self.orig_diag / diag
# The original screen bounds are used to test if we've reached max_zoom
orig_screen_low, orig_screen_high = \
map(asarray, self._map_coordinate_box(*self._orig_screen_bounds))
new_low = center - zoom * (center - orig_screen_low) - translation
new_high = center + zoom * (orig_screen_high - center) - translation
for ndx in (0,1):
if self._zoom_limit_reached(orig_screen_low[ndx],
orig_screen_high[ndx], new_low[ndx], new_high[ndx]):
return
c = self.component
c.x_mapper.range.set_bounds(new_low[0], new_high[0])
c.y_mapper.range.set_bounds(new_low[1], new_high[1])
self.component.request_redraw()
def dragging_blob_up(self, event):
self._handle_blob_leave(event)
def _handle_blob_move(self, event):
if event.bid not in self._blobs:
return
self._blobs[event.bid] = event.x, event.y
self._moves[event.bid] = event.x0, event.y0
event.handled = True
def _handle_blob_leave(self, event):
if event.bid in self._blobs:
del self._blobs[event.bid]
self._moves.pop(event.bid, None)
event.window.release_blob(event.bid)
if len(self._blobs) < 2:
self.event_state = "normal"
class MPPanZoom(BaseTool):
""" This tool wraps a pan and a zoom tool, and automatically switches
behavior back and forth depending on how many blobs are tracked on
screen.
"""
pan = Instance(MPPanTool)
zoom = Instance(MPDragZoom)
event_state = Enum("normal", "pan", "zoom")
_blobs = Delegate('zoom')
_moves = Delegate('zoom')
def _dispatch_stateful_event(self, event, suffix):
self.zoom.dispatch(event, suffix)
event.handled = False
self.pan.dispatch(event, suffix)
if len(self._blobs) == 2:
self.event_state = 'zoom'
elif len(self._blobs) == 1:
self.event_state = 'pan'
elif len(self._blobs) == 0:
self.event_state = 'normal'
else:
assert len(self._blobs) <= 2
if suffix == 'blob_up':
event.window.release_blob(event.bid)
elif suffix == 'blob_down':
event.window.release_blob(event.bid)
event.window.capture_blob(self, event.bid, event.net_transform())
event.handled = True
def _component_changed(self, old, new):
self.pan.component = new
self.zoom.component = new
def _pan_default(self):
return MPPanTool(self.component)
def _zoom_default(self):
return MPDragZoom(self.component)
class MPLegendTool(LegendTool):
event_state = Enum("normal", "dragging")
cur_bid = Int(-1)
def normal_blob_down(self, event):
if self.cur_bid == -1 and self.is_draggable(event.x, event.y):
self.cur_bid = event.bid
self.drag_start(event)
def dragging_blob_up(self, event):
if event.bid == self.cur_bid:
self.cur_bid = -1
self.drag_end(event)
def dragging_blob_move(self, event):
if event.bid == self.cur_bid:
self.dragging(event)
def drag_start(self, event):
if self.component:
self.original_padding = self.component.padding
if hasattr(event, "bid"):
event.window.capture_blob(self, event.bid,
event.net_transform())
else:
event.window.set_mouse_owner(self, event.net_transform())
self.mouse_down_position = (event.x,event.y)
self.event_state = "dragging"
event.handled = True
return
def drag_end(self, event):
if hasattr(event, "bid"):
event.window.release_blob(event.bid)
self.event_state = "normal"
LegendTool.drag_end(self, event)
class MPRangeSelection(RangeSelection):
# Maps blob ID numbers to the (x,y) coordinates that came in.
_blobs = Dict()
# Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.
_moves = Dict()
# Properties to convert the dictionaries to map from blob ID numbers to
# a single coordinate appropriate for the axis the range selects on.
_axis_blobs = Property(Dict)
_axis_moves = Property(Dict)
def _convert_to_axis(self, d):
""" Convert a mapping of ID to (x,y) tuple to a mapping of ID to just
the coordinate appropriate for the selected axis.
"""
if self.axis == 'index':
idx = self.axis_index
else:
idx = 1-self.axis_index
d2 = {}
for id, coords in d.items():
d2[id] = coords[idx]
return d2
def _get__axis_blobs(self):
return self._convert_to_axis(self._blobs)
def _get__axis_moves(self):
return self._convert_to_axis(self._moves)
def normal_blob_down(self, event):
if len(self._blobs) < 2:
self._blobs[event.bid] = (event.x, event.y)
event.window.capture_blob(self, event.bid,
transform=event.net_transform())
event.handled = True
def normal_blob_up(self, event):
self._handle_blob_leave(event)
def normal_blob_frame_end(self, event):
if len(self._blobs) == 2:
self.event_state = "selecting"
#self.drag_start(event, capture_mouse=False)
#self.selecting_mouse_move(event)
self._set_sizing_cursor(event)
self.selection = sorted(self._axis_blobs.values())
def selecting_blob_move(self, event):
if event.bid in self._blobs:
self._blobs[event.bid] = event.x, event.y
self._moves[event.bid] = event.x0, event.y0
def selecting_blob_up(self, event):
self._handle_blob_leave(event)
def selecting_blob_frame_end(self, event):
if self.selection is None:
return
elif len(self._blobs) == 2:
axis_index = self.axis_index
low = self.plot.position[axis_index]
high = low + self.plot.bounds[axis_index] - 1
p1, p2 = self._axis_blobs.values()
# XXX: what if p1 or p2 is out of bounds?
m1 = self.mapper.map_data(p1)
m2 = self.mapper.map_data(p2)
low_val = min(m1, m2)
high_val = max(m1, m2)
self.selection = (low_val, high_val)
self.component.request_redraw()
elif len(self._moves) == 1:
id, p0 = self._axis_moves.items()[0]
m0 = self.mapper.map_data(p0)
low, high = self.selection
if low <= m0 <= high:
m1 = self.mapper.map_data(self._axis_blobs[id])
dm = m1 - m0
self.selection = (low+dm, high+dm)
def selected_blob_down(self, event):
if len(self._blobs) < 2:
self._blobs[event.bid] = (event.x, event.y)
event.window.capture_blob(self, event.bid,
transform=event.net_transform())
event.handled = True
def selected_blob_move(self, event):
if event.bid in self._blobs:
self._blobs[event.bid] = event.x, event.y
self._moves[event.bid] = event.x0, event.y0
def selected_blob_frame_end(self, event):
self.selecting_blob_frame_end(event)
def selected_blob_up(self, event):
self._handle_blob_leave(event)
def _handle_blob_leave(self, event):
self._moves.pop(event.bid, None)
if event.bid in self._blobs:
del self._blobs[event.bid]
event.window.release_blob(event.bid)
# Treat the blob leave as a selecting_mouse_up event
self.selecting_right_up(event)
if len(self._blobs) < 2:
self.event_state = "selected"
| {
"repo_name": "burnpanck/chaco",
"path": "examples/demo/canvas/mptools.py",
"copies": "3",
"size": "12674",
"license": "bsd-3-clause",
"hash": 3051875753965798000,
"line_mean": 32.5291005291,
"line_max": 90,
"alpha_frac": 0.5863184472,
"autogenerated": false,
"ratio": 3.5343000557724484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0040984357569393915,
"num_lines": 378
} |
''' A collection of classes and functions for retrieving entries from an Atom
feed.
Atom standard: https://en.wikipedia.org/wiki/Atom_(standard)
'''
from helpers.content import Content
from helpers.xmlnode import get_feed
NS_FORMAT = '{{http://www.w3.org/2005/Atom}}{tag}'
ENTRY_TAG = NS_FORMAT.format(tag='entry')
ID_TAG = NS_FORMAT.format(tag='id')
PUBLISHED_TAG = NS_FORMAT.format(tag='published')
TITLE_TAG = NS_FORMAT.format(tag='title')
LINK_TAG = NS_FORMAT.format(tag='link')
CONTENT_TAG = NS_FORMAT.format(tag='content')
CATEGORY_TAG = NS_FORMAT.format(tag='category')
def atom_to_content(node):
''' Convert an XMLNode repesenting an Atom entry into Content.
- node : An XMLNode
- return : Content
'''
return Content(
id=node.children[ID_TAG][0].text,
date=node.children[PUBLISHED_TAG][0].text,
title=node.children[TITLE_TAG][0].text,
link=node.children[LINK_TAG][0].attributes['href'],
content=node.children[CONTENT_TAG][0].text,
tags=list(map(lambda c: c.text, node.children[CATEGORY_TAG]))
)
def get_atom_entries_from_node(root):
''' Get an iterable of Atom entries from an XMLNode.
- root : The root XMLNode representing an Atom feed.
- return : An iterable of XMLNodes
'''
return root.children[ENTRY_TAG]
def get_atom_feed(url):
''' Get an Atom feed from a URL.
- url : The URL of an Atom feed
- entry_tag : The tag of entry elements
- return : the feed as an iterable of Element
'''
yield from get_feed(url, get_atom_entries_from_node, atom_to_content)
def main():
for item in get_atom_feed('https://github.com/foldr.atom'):
print(item.to_json_dict())
if __name__ == '__main__':
main()
| {
"repo_name": "foldr/collector-atom",
"path": "atom.py",
"copies": "1",
"size": "1830",
"license": "mit",
"hash": -8650174065483614000,
"line_mean": 24.9117647059,
"line_max": 77,
"alpha_frac": 0.6327868852,
"autogenerated": false,
"ratio": 3.3951762523191094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45279631375191093,
"avg_score": null,
"num_lines": null
} |
''' A collection of classes for generating XML documents lazily from an
iterable stream of data.
'''
from functools import partial
from itertools import islice
from urllib.request import urlopen
from xml.etree.ElementTree import XMLPullParser
def nth(iterable, n):
''' Returns the nth item of an iterable, or raise an exception if not
enough elements.
- iterable : an iterable
- n : the index of the item to retrieve
- return : the nth element
'''
try:
return next(islice(iterable, n, None))
except StopIteration:
raise Exception('Iterable out of range: {}'.format(n))
class BufferedIter:
''' A class that buffers an iterable such that it can be iterated multiple
times.
'''
def __init__(self, iter):
''' Create a new BufferedIter from the supplied iterable.
- iter : an iterable
'''
self.iter = iter
self.buffer = []
def __iter__(self):
''' Iterate the underlying iter once and store the resulting values.
Subsequent iterations will reuse the previously saved values.
- return : an iterable of items
'''
yield from self.buffer
for elem in self.iter:
self.buffer.append(elem)
yield elem
def __getitem__(self, index):
''' Get the index'th item in the buffered iterable.
- index : the index of the item to retrieve
- return : the item at the index'th position
'''
return nth(self, index)
class XMLNodeCollection:
''' A class representing a collection of XML nodes. Allows filtering with
the indexing operator.
'''
def __init__(self, nodes):
''' Create a new XMLNodeCollection from the supplied nodes.
- nodes: an iterable of nodes
'''
self.nodes = BufferedIter(nodes)
def __iter__(self):
''' Iterate the collection of nodes.
- return : an iterable of nodes
'''
return iter(self.nodes)
def __getitem__(self, key):
''' Get a subset of the collection of nodes. If key is an integer,
treat it as an index and retrieve the key'th element of the collection.
If key is not an integer, treat it as a tag and filter the collection
by the tag.
- key : an integer index or a tag filter
- return : a node, or a subset of nodes
'''
return (
self.nodes[key]
if isinstance(key, int) else
XMLNodeCollection(filter(lambda node: node.tag == key, self))
)
class XMLNode:
''' Class representing an XML node. This class lazily generates its child
nodes by parsing only as much as required.
'''
@staticmethod
def parse(iter):
''' Create a new XML node. This takes an interable which feeds text
lazily to the XML parser. The iterable is consumed as required to
generate child nodes as requested.
- iter : an iterable that generates bytes for the XML parser
- return : an XMLNode
'''
parser = XMLPullParser(['start', 'end'])
events = XMLNode.stream_events(parser, iter)
return XMLNode(events)
@staticmethod
def stream_events(parser, iter):
''' Feed the parser as required to lazily generate an iterable of
parse events. Not intended for use by user code.
- parser : an XMLPullParser
- iter : an iterable which feeds bytes to the parser
- return : an iterable of parse events.
'''
for chunk in iter:
parser.feed(chunk)
yield from parser.read_events()
def __init__(self, events, element=None):
''' Create a new XML node. Not intended for use by user code, please
use XMLNode.parse instead.
- events : an iterable of parse events
- element : the current element read from the parser
'''
self.events = events
self.__element = element
self.__children = None
self.__text = None
def __force(self):
''' Force evaluation of this node and all its child nodes, to advance
the parser to the end of this node.
'''
self.element
for _ in self.children:
pass
@property
def element(self):
''' Lazily get the Element representing this node.
- return : the Element
'''
if self.__element == None:
(event, element) = next(self.events)
self.__element = element
return self.__element
@property
def tag(self):
''' Lazily get the tag of this element.
- return : the tag as a string
'''
return self.element.tag
@property
def attributes(self):
''' Lazily get the attributes of this element.
- return : the attributes as a dictionary
'''
return self.element.attrib
@property
def children(self):
''' Lazily get an iterable of child nodes.
- return : the children as an iterable of XMLNode
'''
if self.__children is None:
def children():
events = self.events
self.element # ensure element has aleady been read
for (event, element) in events:
if event == 'start':
child = XMLNode(events, element)
yield child
child.__force()
elif event == 'end':
self.__text = element.text
return
else:
raise Exception
self.__children = XMLNodeCollection(children())
return self.__children
@property
def text(self):
''' Lazily get the text of this element.
- return : the text as a string
'''
self.__force()
return self.__text
def __eq__(self, other):
return (
self.tag == other.tag
and self.text == other.text
and all(left == right for (left, right) in zip(self.children, other.children))
)
def get_feed(url, selector, converter):
''' Get a feed from a URL.
- url : The URL of a feed.
- selector : A function to retrieve the iterable of content elements.
- converter : A function to convert an xml element.
- return : An iterable of Content
'''
with urlopen(url) as f:
root = XMLNode.parse(iter(partial(f.read, 256), b''))
for child in selector(root):
yield converter(child) | {
"repo_name": "foldr/helpers-py",
"path": "xmlnode.py",
"copies": "1",
"size": "6902",
"license": "mit",
"hash": -5187537150939887000,
"line_mean": 26.2950819672,
"line_max": 90,
"alpha_frac": 0.5523036801,
"autogenerated": false,
"ratio": 4.816468946266574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5868772626366574,
"avg_score": null,
"num_lines": null
} |
"""A collection of classes that are part of the standard runtime environment."""
class RuntimeException(Exception):
"""A runtime exception."""
def __init__(self, message):
super().__init__("RuntimeException: " + message)
class NamespaceException(RuntimeException):
"""A namespace exception."""
def __init__(self, item):
super().__init__("%s does not exist in the search space." % item)
class Namespace:
"""A variable and operator namespace."""
def __init__(self, parent):
"""Initialize a new namespace."""
# parent namespace
self.parent = parent
self.search_spaces = {
"id": {}, # identifier search space
"op": {}, # operator search space
"ty": {}, # type search space
}
def find(self, space, key):
"""Search for an element in this and higher order namespaces."""
# search in local namespace
if key in self.search_spaces[space]:
return self.search_spaces[space][key]
# find in parent namespaces
if self.parent is not None:
return self.parent.find(space, key)
# return if nothing available
raise NamespaceException(key)
def store(self, item):
"""Stores a item in the specified search space."""
itemtype = type(item)
if itemtype in (Value, Function):
self.search_spaces["id"][item.name] = item
elif itemtype is Operator:
self.search_spaces["op"][item.symbol] = item
elif itemtype is Datatype:
self.search_spaces["ty"][item.name] = item
else:
raise RuntimeException("The item cannot be stored in namespace")
def store_all(self, items):
"""Stores a list or tuple of items."""
for element in items:
self.store(element)
def child(self):
"""Returns a new namespace with this namespace as its parent."""
return Namespace(self)
def __str__(self):
return "<Namespace>"
class Context:
"""A context for temporary storage."""
def __init__(self, namespace):
self.namespace = namespace
self.global_namespace = namespace
self.behaviour = "default"
self.flags = []
def store(self, item):
"""Forwards to Namespace.store"""
return self.namespace.store(item)
def load(self, library):
"""Loads a library into the current namespace."""
self.namespace.store_all(library.EXPORTS)
def find(self, space, key):
"""Forwards to Namespace.find"""
return self.namespace.find(space, key)
def substitute(self):
"""Substitutes the current namespace by a child."""
org = self.namespace
self.namespace = self.namespace.child()
return org
def __str__(self):
return "<Context>"
class Value(object):
"""A variable storing a value with a specific type."""
def __init__(self, datatype, data=None, name=None):
self.datatype = datatype
self.data = data
self.name = name
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.datatype == other.datatype
and self.data == other.data)
def __ne__(self, other):
return not self.__eq__(other)
def format(self):
return self.datatype.format(self.data)
def __str__(self):
return "<Value ? %s *(%s)>" % (self.datatype, self.name)
class ArgumentException(Exception):
"""ArgumentException is raised when the number of arguments do not match the signature."""
def __init__(self, expected, got):
message = "Too " + ("many" if expected < got else "less") + "arguments"
super().__init__("%s: expected %d, got %d" % (message, expected, got))
class ArgumentCastException(Exception):
"""ArgumentCastError is raised when the type of arguments do not match the signature."""
def __init__(self, expected, got):
super().__init__("No match found for %s to %s" % (expected, got))
class Signature(object):
"""A signature matching a function call."""
def __init__(self, expected, function):
self.expected = expected
self.function = function
def match(self, args):
"""Checks if the arguments match the function signature.
If yes, it returns (Arguments, Function)
If no, it raises an error."""
number_of_expected, number_of_args = len(self.expected), len(args)
# Too many arguments
if number_of_expected < number_of_args:
raise ArgumentException(number_of_expected, number_of_args)
# Fill the argument list with trash
matched_args = []
# Iterate until max arguments reached
for index in range(number_of_expected):
expected = self.expected[index]
expected_type = expected.datatype
# Still arguments to go
if number_of_args > index:
arg = args[index]
arg_type = arg.datatype
if not arg_type.kind_of(expected_type):
raise ArgumentCastException(
expected_type, arg_type)
var = arg_type.cast(arg)
# Not enough arguments given, looking for default values
elif expected.data != None:
var = expected_type.cast(expected)
# Not enough arguments given, no default values
else:
raise ArgumentException(number_of_expected, number_of_args)
var.name = expected.name
matched_args.append(var)
return matched_args, self.function
def __str__(self):
return "<Signature (%s)>" % ",".join(self.expected.name)
class FunctionException(Exception):
"""Raised when no matching signature was found or a similar error occured."""
def __init__(self, function, message="No signature found"):
super().__init__("%s in %s" % (message, function))
class Function(object):
"""A function with a collection of signatures."""
def __init__(self, signatures, name=None, source_ns=None):
self.signatures = signatures
self.name = name
self.source_ns = source_ns
def format(self):
return self.__str__()
def eval(self, args, context):
"""Searches for a matching signature and evaluates the function node."""
for sgn in self.signatures:
try:
values, fnc = sgn.match(args)
original, context.namespace = context.namespace, Namespace(self.source_ns)
# place args in namespace
context.namespace.store_all(values)
result = fnc.eval(context)
context.namespace = original
return result
except (ArgumentException, ArgumentCastException):
pass
raise FunctionException(self)
def __str__(self):
return "<Function *(%s)>" % self.name
class FunctionBinding(object):
"""A binding object for Python functions."""
def __init__(self, fnc):
self.fnc = fnc
def eval(self, context):
"""Evaluates the binding node."""
return self.fnc(context)
class OperatorException(RuntimeException):
"""A operator exception."""
def __init__(self, op):
super().__init__("Operator %s is not applicable." % op)
class Operator(object):
"""A operator with a collection of signatures and functions."""
def __init__(self, base_function, symbol):
self.functions = [base_function]
self.symbol = symbol
def add_function(self, fnc):
self.functions.append(fnc)
def eval(self, args, context):
"""Evaluates the operator."""
for fnc in self.functions:
try:
return fnc.eval(args, context)
except FunctionException:
pass
raise OperatorException(self.symbol)
def __str__(self):
return "<Operator (%s)>" % self.symbol
class Datatype(object):
"""A type representing a basic type."""
def __init__(self, name, cast=None, parent=None, format=None):
self.name = name
self.cast = cast
self.parent = parent
self.format = format
def format(self, value):
return self.format(value)
def kind_of(self, itemtype):
"""Checks if the type is related to the specified type."""
if self is itemtype:
return True
if self.parent is not None:
return self.parent.kind_of(itemtype)
return False
def __str__(self):
return "<T %s>" % self.name
class AssignmentException(RuntimeException):
"""A AssignmentExxception is raised when a value datatype is not the same as the assigned one."""
def __init__(self, typeA, typeB):
super().__init__("%s can not be assigned to %s" % (typeA, typeB))
class CastException(RuntimeException):
"""A CastError is raised when a value can not be casted to another type."""
def __init__(self, value, datatype):
super().__init__("%s not parseable to %s" % (value, datatype))
def empty_context():
"""empty_context generates an empty context with a clean namespace."""
return Context(Namespace(None))
# Types that belong to the REnv, not to the RLib
ANY = Datatype("*any", None)
NULL = Datatype("null", lambda x: Value(NULL), ANY, lambda x: "null")
| {
"repo_name": "lnsp/tea",
"path": "runtime/env.py",
"copies": "1",
"size": "9484",
"license": "mit",
"hash": -7800075810921580000,
"line_mean": 31.0405405405,
"line_max": 101,
"alpha_frac": 0.5947912273,
"autogenerated": false,
"ratio": 4.328617069831127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5423408297131127,
"avg_score": null,
"num_lines": null
} |
"""A collection of classes to make working with energy and reserve offers
from the New Zealand Electricity Market a bit more painless.
Contains a master class, Offer, which publically exposed subclasses
inherit from.
This master class contains the code necessary for the exposed subclasses
to function.
"""
# Standard Library
import sys
import os
import datetime as dt
from dateutil.parser import parse
from collections import defaultdict
from datetime import datetime, timedelta
import collections
import functools
import itertools
# Non C Dependencies
import simplejson as json
# C Dependencies
import pandas as pd
import numpy as np
sys.path.append(os.path.join(os.path.expanduser("~"),
'python', 'pdtools'))
try:
import pdtools
except:
print "Failed to import pdtools"
try:
CONFIG = json.load(open(os.path.join(
os.path.expanduser('~/python/nzem/nzem/_static/'), 'config.json')))
except:
print "CONFIG File does not exist"
class Offer(object):
"""The Master Offer Class"""
def __init__(self, offers, run_operations=True):
""" Create the offer class by passing an offer DataFrame, optionally
run a number of modifications
Parameters
----------
offers : type Pandas DataFrame
A Pandas DataFrame containing offer data
run_operations : type bool
Run the operations on the contained offers
Returns
-------
Offer : type Offer
A container around a Pandas DataFrame containing additional
functionality
"""
super(Offer, self).__init__()
self.offers = offers.copy()
self.offer_stack = None
if run_operations:
self._retitle_columns()
self._convert_dates()
self._map_location()
self._apply_datetime()
self._sort_offers()
self.offers.index = np.arange(len(self.offers))
def stack_columns(self):
""" Stack a horizontal dataframe into a vertical configuration to
improve functionality
Exists as an exposed wrapper around the _stacker() class which
is a python generator yielding stacked DataFrames.
Returns
-------
self.offer_stack : type Pandas DataFrame
A DataFrame containing offer data with identifiers that has
been stacked vertically
"""
self.offer_stack = pd.concat(self._stacker(), ignore_index=True)
def filter_dates(self, begin_date=None, end_date=None,
horizontal=False, inplace=True, return_df=False):
""" Filter either the Offer DataFrame or Stacked Offer frame
between two dates.
Parameters
----------
begin_date: str, datetime, default None
The first date to take, inclusive
end_date: str, datetime, default None
The last date to take, inclusive
horizontal: bool, default False
Whether to apply to the stacked offer frame or the horizontal offer frame
inplace: bool, default True
Overwrite the current offer with the new one
return_df: bool, default False
Return the filtered result to the user.
Returns
-------
offer: DataFrame
A subset of the initial offers for the date range specified.
"""
if horizontal:
offers = self.offers
else:
offers = self.offer_stack
offers = offers.ge_mask("Trading Date", begin_date).le_mask("Trading Date", end_date)
if inplace:
if horizontal:
self.offers = offers
else:
self.offer_stack = offers
if return_df:
return offers
def filter_stack(self, date=None, period=None, product_type=None,
reserve_type=None, island=None, company=None,
region=None, station=None, non_zero=False,
return_df=False):
"""Filter a vertically stacked offer dataframe to obtain a
subset of the data within it.
Parameters
----------
self.offer_stack : Pandas DataFrame
Stacked data, if it does not exist it will be created
date : str, bool default None
The trading date to filter by
period : str, bool default None
The Trading Period to filter by
product_type : str, bool default None
Which product, IL, PLSR, TWDSR, Energy to filter by
reserve_type : str, bool default None
FIR, SIR or Energy, which reserve type to use
island : str, bool default None
Whether to filter by a specific Island e.g. (North Island)
company : str, bool default None
Filter a specific company (e.g. MRPL)
region : str, bool, default None
Filter a specific region (e.g. Auckland)
station : str, bool, default None
Which Station to filter, Generators optionally
non_zero : bool, default False
Return only non zero offers
return_df : bool, default False
Return the filtered DataFrame as well as saving to latest query
Returns
-------
fstack : Pandas DataFrame
The filtered DataFrame
self.fstack : Pandas DataFrame
The filtered DataFrame applied to a class method
"""
if not isinstance(self.offer_stack, pd.DataFrame):
self.stack_columns()
fstack = self.offer_stack.copy()
if date:
fstack = fstack.eq_mask("Trading Date", date)
if period:
fstack = fstack.eq_mask("Trading Period", period)
if product_type:
fstack = fstack.eq_mask("Product Type", product_type)
if reserve_type:
fstack = fstack.eq_mask("Reserve Type", reserve_type)
if island:
fstack = fstack.eq_mask("Island", island)
if company:
fstack = fstack.eq_mask("Company", company)
if region:
fstack = fstack.eq_mask("Region", region)
if station:
fstack = fstack.eq_mask("Station", station)
if non_zero:
fstack = fstack.gt_mask("Max", 0)
self.fstack = fstack
if return_df:
return fstack
def clear_offer(self, requirement, fstack=None, return_df=True):
""" Clear the offer stack against a requirement
Parameters
----------
self.fstack : pandas DataFrame
The filter query, must be for a single period and date
requirement : float
The requirement for energy or reserve, must be a positive number
fstack : pandas DataFrame, bool default None
Optional argument to not use the current query
return_df : bool, default True
Return the DataFrame to the user, or keep as query
Returns
-------
cleared_stack : DataFrame
A DataFrame which has been cleared against the requirement
uncleared_stack: DataFrame
A DataFrame containing the offers which were not cleared
"""
if not isinstance(fstack, pd.DataFrame):
fstack = self.fstack.copy()
if len(fstack) == 0:
raise ValueError("fstack must be a DataFrame, \
current size is zero")
if requirement <= 0:
return (None, fstack)
# Drop all non-zero offers
fstack = fstack[fstack["Max"] > 0]
# Sort by price
fstack = fstack.sort(columns=["Price"])
# Cumulative Offer
fstack["Cumulative Offer"] = fstack["Max"].cumsum()
# Reindex
fstack.index = np.arange(len(fstack))
# Get marginal unit index
try:
marginal_unit = fstack[fstack["Cumulative Offer"] >= requirement].index[0]
except:
marginal_unit = fstack.iloc[-1].name
# Get the stacks
cleared_stack = fstack.iloc[:marginal_unit+1].copy()
uncleared_stack = fstack.iloc[marginal_unit:].copy()
# Change the marginal unit to reflect the true params
remain = uncleared_stack["Cumulative Offer"][marginal_unit] - requirement
uncleared_stack["Max"][marginal_unit] = remain
cleared_stack["Max"][marginal_unit] = cleared_stack["Max"][marginal_unit] - remain
return cleared_stack, uncleared_stack
def _stacker(self):
""" General Stacker designed to handle all forms of
offer dataframe, energy, plsr, and IL
"""
general_columns = [x for x in self.offers.columns if "Band" not in x]
band_columns = [x for x in self.offers.columns if x not in general_columns]
filterdict = self._assign_band(band_columns)
for key in filterdict:
all_cols = general_columns + filterdict[key].values()
single = self.offers[all_cols].copy()
# Assign identifiers
single["Product Type"] = key[0]
single["Reserve Type"] = key[1]
single["Band Number"] = key[2]
single.rename(columns={v: k for k, v in filterdict[key].items()},
inplace=True)
yield single
def _assign_band(self, band_columns):
""" Figure out what type of columns they are from the bands
Should return a list of lists of the form
Product Type, Reserve Type, Band, Params*
"""
filtered = defaultdict(dict)
for band in band_columns:
split = band.split()
band_number = int(split[0][4:])
param = split[-1]
reserve_type = self._reserve_type(band)
product_type = self._product_type(band)
filtered[(product_type, reserve_type, band_number)][param] = band
return filtered
def _reserve_type(self, band):
return "FIR" if "6S" in band else "SIR" if "60S" in band else "Energy"
def _product_type(self, band):
return "PLSR" if (
"Plsr" in band) else "TWDSR" if (
"Twdsr" in band) else"IL" if (
"6S" in band or "60S" in band) else "Energy"
def _retitle_columns(self):
self.offers.rename(columns={x: x.replace('_', ' ').title().strip()
for x in self.offers.columns}, inplace=True)
def _map_location(self, user_map=None,
left_on="Grid Exit Point", right_on="Grid Exit Point"):
"""
Map the location based upon the node.
Useful when looking at regional instances
"""
if not user_map:
user_map = pd.read_csv(CONFIG['map-location'])
user_map = user_map[["Node", "Load Area", "Island Name",
"Generation Type"]]
user_map.rename(columns={"Node": "Grid Exit Point",
"Load Area": "Region", "Island Name": "Island"}, inplace=True)
if "Grid Exit Point" in self.offers.columns:
left_on = "Grid Exit Point"
elif "Grid Injection Point" in self.offers.columns:
left_on = "Grid Injection Point"
elif "Grid Point" in self.offers.columns:
left_on = "Grid Point"
else:
pass
self._column_stripper(left_on)
self.offers = self.offers.merge(user_map, left_on=left_on, right_on=right_on)
def _convert_dates(self, date_col="Trading Date"):
self.offers[date_col] = pd.to_datetime(self.offers[date_col])
def _apply_datetime(self, date_col="Trading Date",
period_col="Trading Period", datetime_col="Trading Datetime"):
period_map = {x: self._period_minutes(x) for x in set(self.offers[period_col])}
self.offers[datetime_col] = self.offers[date_col] + self.offers[period_col].map(period_map)
def _period_minutes(self, period):
return timedelta(minutes=int(period)*30 -15)
def _sort_offers(self, datetime_col="Trading Datetime"):
self.offers.sort(columns=[datetime_col], inplace=True)
def _column_stripper(self, column):
self.offers[column] = self.offers[column].apply(lambda x: x.strip())
class ILOffer(Offer):
""" Wrapper around an IL Offer dataframe which provides a number
of useful functions in assessing the Energy Offers.
Is created by passing a pandas DataFrame in the standard WITS
template and then modificiations are made from there
"""
def __init__(self, offers):
super(ILOffer, self).__init__(offers)
def merge_stacked_offers(self, plsr_offer):
if not isinstance(self.offer_stack, pd.DataFrame):
self.stack_columns()
if not isinstance(plsr_offer.offer_stack, pd.DataFrame):
plsr_offer.stack_columns()
return ReserveOffer(pd.concat([self.offer_stack,
plsr_offer.offer_stack], ignore_index=True))
class PLSROffer(Offer):
""" Wrapper around an PLSR Offer dataframe which provides a number
of useful functions in assessing the Energy Offers.
Is created by passing a pandas DataFrame in the standard WITS
template and then modificiations are made from there
"""
def __init__(self, offers, run_operations=False):
super(PLSROffer, self).__init__(offers, run_operations=run_operations)
def fan_offers(self, **classifiers):
stack = self.filter_stack(return_df=True, **classifiers)
def unique_classifier(self, series):
return " ".join([series["Grid Point"], "".join([series["Station"], str(series["Unit"])])])
def _bathtub(self, stack, maximum_capacity):
energy_stack = np.arange(0, maximum_capacity, 1)
reserve_stack = np.zeros(len(energy_stack))
for index, row in stack.iterrows():
reserve = energy_stack * row["Percent"] / 100
rmap = np.where(reserve <= row["Max"], reserve, row["Max"])
reserve_stack = reserve_stack + rmap
reserve_stack = np.where(reserve_stack <= energy_stack[::-1], reserve_stack, energy_stack[::-1])
marginal_energy = energy_stack[1:] - energy_stack[:-1]
marginal_reserve = reserve_stack[1:] - reserve_stack[:-1]
df = pd.DataFrame(np.array([energy_stack[1:], marginal_reserve]).T, columns=["Cumulative Station Offer", "Marginal Reserve"])
df["Market Node Id"] = stack["Market Node Id"].unique()[0]
return df
def _reserve_stack(self, stack, capacity_dict):
for classifier in stack["Market Node Id"].unique():
mini_stack = stack.eq_mask("Market Node Id", classifier)
max_capacity = capacity_dict[classifier]
yield self._bathtub(mini_stack, max_capacity)
def marginal_reserve_stack(self, stack, capacity_dict):
return pd.concat(self._reserve_stack(stack, capacity_dict), ignore_index=True)
def merge_stacked_offers(self, il_offer):
if not isinstance(self.offer_stack, pd.DataFrame):
self.stack_columns()
if not isinstance(il_offer.offer_stack, pd.DataFrame):
il_offer.stack_columns()
return ReserveOffer(pd.concat([self.offer_stack,
il_offer.offer_stack], ignore_index=True))
class ReserveOffer(Offer):
""" Container for mixed PLSR, IL and TWDSR Offers.
Created by using the merge offers method of either the ILOffer
or PLSROffer classes.
"""
def __init__(self, offers):
super(ReserveOffer, self).__init__(offers, run_operations=False)
# Note, a raw Offer frame isn't passed, therefore manually add it
# to the offer stack
self.offer_stack = offers
def NRM_Clear(self, fstack=None, max_req=0, ni_min=0, si_min=0):
""" Clear the reserve offers as though the National Reserve Market
was in effect. In effect will clear the market three times, with
a reduced offer DataFrame each time.
Note fstack must have been filtered already.
Parameters
----------
fstack: DataFrame, default None
If desired don't use the "saved" filtered stack
max_req: int, float, default 0
The maximum requirement for reserves across the whole country.
ni_min: int, float, default 0
The North Island minimum requirement for reserve
si_min: int, float, default 0
The South Island minimum requirement for reserve
Returns
-------
all_clear: DataFrame
DataFrame which contains only the units which were cleared
in the dispatch along with the respective North and South
Island prices.
Usage
-----
Note, this solver can be used to run a number of different
possibilities depending upon the requirements passed.
This improves the utility and flexibility of the method
substantially, see the examples below for how to do this.
The mixed market approach is intended for assessing a national
reserve market in the presence of an HVDC link.
Example One: Pure National Reserve Market
>>> NRM_Clear(max_req=requirement, ni_min=0, si_min=0)
Example Two: Stand alone Reserve Market
>>> NRM_clear(max_req=0, ni_min=ni_risk, si_min=si_risk)
Example Three: Mixed Market (exporting island reserve cannot
securve importing island risk)
>>> NRM_Clear(max_req=max_risk, ni_min=ni_hvdc, si_min=si_hvdc)
"""
if not isinstance(fstack, pd.DataFrame):
fstack = self.fstack.copy()
national_requirement = max(max_req - ni_min - si_min, 0.)
(nat_clear, nat_remain) = self.clear_offer(requirement=national_requirement, fstack=fstack)
# I don't think this section is write, hence commenting out for now
# # Calculate how much has been cleared from each island
# if isinstance(nat_clear, pd.DataFrame):
# ni_cleared = nat_clear.eq_mask("Island", "North Island")["Max"].sum()
# si_cleared = nat_clear.eq_mask("Island", "South Island")["Max"].sum()
# else:
# ni_cleared = 0
# si_cleared = 0
# # Adjust the minimum requirements appropriately
# ni_min = max(ni_min - ni_cleared, 0)
# si_min = max(si_min - si_cleared, 0)
# Calculate the new stacks
ni_stack = nat_remain.eq_mask("Island", "North Island")
si_stack = nat_remain.eq_mask("Island", "South Island")
# Clear each island individually
(ni_clear, ni_remain) = self.clear_offer(requirement=ni_min,
fstack=ni_stack)
(si_clear, si_remain) = self.clear_offer(requirement=si_min,
fstack=si_stack)
# Set the prices
nat_price = 0
if isinstance(nat_clear, pd.DataFrame):
nat_price = max(nat_clear.iloc[-1]["Price"],0)
if isinstance(ni_clear, pd.DataFrame):
ni_price = max(ni_clear.iloc[-1]["Price"],nat_price)
else:
ni_price = nat_price
if isinstance(si_clear, pd.DataFrame):
si_price = max(si_clear.iloc[-1]["Price"],nat_price)
else:
si_price = nat_price
all_clear = pd.concat((nat_clear, ni_clear, si_clear), ignore_index=True)
all_clear["NI Price"] = ni_price
all_clear["SI Price"] = si_price
return all_clear
def clear_all_NRM(self, clearing_requirements):
""" Clear all of the NRM for a particular Reserve Offer
DataFrame. Will iterate through all reserve types,
trading dates and trading periods and match these with requirements
for solving as a NRM.
Parameters
----------
self: ReserveOffer object
clearing_requirements: DataFrame
Multiindexed DataFrame containing the requirements for
reserve both in total and for each island.
Returns
-------
NRM_Cleared_Stack: DataFrame
A DataFrame containing information for all units which were
cleared in the operation of a NRM.
"""
combinations = list(itertools.product(
self.offer_stack["Trading Date"].unique(),
self.offer_stack["Trading Period"].unique(),
self.offer_stack["Reserve Type"].unique()
))
return pd.concat(self._yield_NRM_result(clearing_requirements,
combinations), ignore_index=True)
def _yield_NRM_result(self, clearing_requirements, combinations):
""" Generator to calculate the solved solution to the NRM
result to be computationally lazy.
Parameters
----------
self: class
To perform the Filter assessment on
clearing_requirements: DataFrame
DataFrame with a multi index of (date, period, reserve_type)
Is index to determine the requirements
combinations: list
List of all combinations to assess
Returns
-------
nrm_solution: DataFrame
The solution to a single iteration of the NRM clearer
Is a generator object to feed into a concatenation
"""
for (date, period, reserve_type) in combinations:
# Get the requirements
try:
max_req, ni_min, si_min = clearing_requirements.ix[date].ix[period].ix[reserve_type].values
except:
yield None
# Filter the data
fstack = self.filter_stack(date=date, period=period,
reserve_type=reserve_type)
# Run the NRM Solver
nrm_solution = self.NRM_Clear(fstack=fstack, max_req=max_req,
ni_min=ni_min, si_min=si_min)
yield nrm_solution
class EnergyOffer(Offer):
""" Wrapper around an Energy Offer dataframe which provides a number
of useful functions in assessing the Energy Offers.
Is created by passing a pandas DataFrame in the standard WITS
template and then modificiations are made from there
"""
def __init__(self, offers, **kargs):
super(EnergyOffer, self).__init__(offers, **kargs)
def unique_classifier(self, series):
return " ".join([series["Grid Injection Point"], "".join([series["Station"], str(series["Unit"])])])
def composite_bathtub(self, plsr_offer, period=None, company=None, island=None, reserve_type=None, product_type=None):
""" Create a Composite Bathtub Constraint for both Energy and Reserve
Stacks
"""
# Filter the Energy Stack
energy_stack = self.filter_stack(period=period, company=company, island=island, return_df=True)
reserve_stack = plsr_offer.filter_stack(period=period, company=company, island=island, reserve_type=reserve_type, product_type=product_type, return_df=True)
energy_stack["Market Node Id"] = energy_stack.apply(self.unique_classifier, axis=1)
reserve_stack["Market Node Id"] = reserve_stack.apply(plsr_offer.unique_classifier, axis=1)
energy_stack = energy_stack.gt_mask("Power", 0)
reserve_stack = reserve_stack.gt_mask("Max", 0)
capacity_dict = energy_stack.groupby("Market Node Id")["Power"].sum()
energy = self.marginal_stack(energy_stack)
reserve = plsr_offer.marginal_reserve_stack(reserve_stack, capacity_dict)
join_names = ["Market Node Id", "Cumulative Station Offer"]
full_stack = energy.merge(reserve, left_on=join_names, right_on=join_names, how='outer')
full_stack.fillna(0, inplace=True)
full_stack["Cumulative Reserve"] = full_stack["Marginal Reserve"].cumsum()
return full_stack
def marginal_stack(self, stack):
marg_stack = pd.concat(self._gen_gen(stack), ignore_index=True)
marg_stack.sort(columns=("Price"), inplace=True)
marg_stack["Cumulative Offer"] = marg_stack["Power"].cumsum()
return marg_stack
def _gen_gen(self, stack):
for classifier in stack["Market Node Id"].unique():
new_stack = stack.eq_mask("Market Node Id", classifier)
single_station = pd.concat(self._marginal_generator(new_stack), axis=1).T
single_station["Cumulative Station Offer"] = single_station["Power"].cumsum()
yield single_station.copy()
def _marginal_generator(self, stack):
""" Assumes a sorted energy stack with appropriate classifiers applied.
"""
new_frame_data = []
for index, series in stack.iterrows():
power = series["Power"]
price = series["Price"]
mni = series["Market Node Id"]
while power > 0:
ser2 = series.copy()
ser2["Power"] = 1
yield ser2
power -= 1
if __name__ == '__main__':
pass
| {
"repo_name": "NigelCleland/Offers",
"path": "Offers/offer_frames.py",
"copies": "1",
"size": "25104",
"license": "bsd-3-clause",
"hash": -6462120860785511000,
"line_mean": 32.6514745308,
"line_max": 164,
"alpha_frac": 0.602692798,
"autogenerated": false,
"ratio": 4.123521681997372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226214479997372,
"avg_score": null,
"num_lines": null
} |
"""A collection of classes, which can check table rows for conditions."""
class Condition:
"""Base class for all condition checks."""
pass
class OneOf(Condition):
"""Class for checking, if a certain field in part of a list of items."""
def __init__(self, field, lst):
"""Store field name and list of items in object."""
self._field = field
self._lst = lst
def __call__(self, compare):
"""Check, if the item compare holds the object condition."""
return getattr(compare, self._field, None) in self._lst
class Equal(OneOf):
"""Class for checking, if a certain field is equal to a value."""
def __init__(self, field, equal):
super().__init__(field, [equal])
class DNF(Condition):
"""Class for checking, if a certain field is / is not a DNF."""
def __init__(self, field, dnf=True):
"""Store field name and list of items in object."""
self._field = field
self._dnf = dnf
def __call__(self, compare):
"""Check, if the item compare holds the object condition."""
return (getattr(compare, self._field, None) == -1) == self._dnf
class TimeBetterThan(Condition):
"""Class for checking, if a certain time is better than a threshold."""
def __init__(self, field, threshold):
"""Store field name and threshold."""
self._field = field
self._threshold = threshold
def __call__(self, compare):
"""Check, if the item compare holds the object condition."""
item = getattr(compare, self._field, float('inf'))
if isinstance(item, int):
return item < self._threshold
else:
return False
| {
"repo_name": "jakobkogler/wca_api",
"path": "wca_api/conditions.py",
"copies": "1",
"size": "1702",
"license": "mit",
"hash": -7803114936333763000,
"line_mean": 29.3928571429,
"line_max": 76,
"alpha_frac": 0.6010575793,
"autogenerated": false,
"ratio": 4.265664160401003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5366721739701003,
"avg_score": null,
"num_lines": null
} |
"""A collection of code for exporting reports in Markdown
This rendering is helpful for manual inspection of results either by command
line or in GitHub. Useful for dev work. Useful too for dumping a quick snapshot
in GitHub for others to use. It may make sense to move this code to a dedicated
package at some point. It is included in report_metrics as a convenience.
Usage is simple.
1. Make a Report instance.
2. Invoke `save_report` to save the report to a file as markdown. Another
option is to use `render_report` to obtain an in-memory str.
"""
from collections import defaultdict
from report_metrics.metrics import ALL
def save_html(report, filename):
with open(filename, 'w') as out:
render_report(report, out)
def render_report(report, out):
header = """\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Counsyl Lab Metrics</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap-theme.min.css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
<style>
body {
padding-left:30px;
padding-right:20px;
}
td, th {
padding-left:5px;
padding-right:5px;
}
</style>
</head>
<body>
"""
out.write(header)
out.write(_render_header('h1', report.name, report.description))
# Write out the TOC.
# out.write('\n- Alerts')
# alert_types = [('New Alerts', 'unresolved'), ('Resolved Alerts', 'resolved')]
# for title, alert_type in alert_types:
# out.write(_render_toc_item(title))
out.write('<ul>')
for group in report.groups:
out.write(_render_toc_item(group.name))
out.write('<ul>')
for metric in group.metrics:
out.write(_render_toc_item(metric.name))
out.write('</ul>')
out.write('</ul>')
# Write out the content.
# if report.alerts:
# out.write('\n\nAlerts\n---')
# for title, alert_type in alert_types:
# out.write('\n\n#### %s\n' % title)
# alerts_to_show = [(metric, alert) for metric, alert in report.alerts if alert['status'] == alert_type]
# alert_text_summary, no_comments = render_alerts_summary(alerts_to_show)
# out.write(alert_text_summary)
# for metric, alert in no_comments:
# out.write(render_alert(metric, alert))
# if not alerts_to_show:
# out.write('- No alerts.')
for group in report.groups:
out.write(_render_header('h2', group.name, group.description))
for metric in group.metrics:
out.write(_render_metric(metric))
footer = """\
<!-- jQuery (necessary for Bootstrap's JavaScript plugins) -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js"></script>
<!-- <script src="http://tablesorter.com/addons/pager/jquery.tablesorter.pager.js"></script> -->
<script src="js/sortable-table.js"></script>
<!-- Acumen Scripts -->
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.1.15.min.js"></script>
<script src="js/acumen-config.js"></script>
<script src="js/acumen.js"></script>
<script type="text/javascript" src="http://www.kunalbabre.com/projects/table2CSV.js" > </script>
<script type="text/javascript">
$(document).ready(function() {
$('table').each(function() {
var $table = $(this);
var $button = $("<button type='button'>");
$button.text("Export to spreadsheet");
$button.insertAfter($table);
$button.click(function() {
var csv = $table.table2CSV({delivery:'value'});
window.location.href = 'data:text/csv;charset=UTF-8,'
+ encodeURIComponent(csv);
});
});
})
</script>
</body>
</html>
"""
out.write(footer)
def _render_header(element, name, description):
return '<a name="%s"></a><%s>%s</%s>%s' % (
slugify(name),
element,
name if name else 'Unknown Title',
element,
description if description else '')
def slugify(text):
return text.replace(' ', '-').lower()
def _render_toc(items):
text = '<ul>'
for item in items:
text += _render_toc_item(item)
return text + '</ul>'
def _render_toc_item(name):
return '<li><a href="#%s">%s</a></li>' % (slugify(name), name)
def _render_metric(metric, indexes=range(13)):
table = _render_header('h3', metric.name, metric.description)
vals = metric.values()
if metric.frequency != ALL and indexes:
vals = [vals[index] for index in indexes if abs(index) < len(vals)]
if metric.frequency == ALL:
vals.sort(key=lambda x: x[1], reverse=True)
table += '<table class="table table-hover sortable" style="width:auto;">'
table += '<thead><tr>'
for col in metric.cols:
table += '<th>' + col + '</th>'
table += '</tr></thead>'
table += '<tbody>'
for row in vals:
table += '<tr>'
for item in row:
table += '<td>' + str(item) + '</td>'
table += '</tr>'
table += '</tbody>'
table += '</table>'
return table
def render_alert(metric, alert):
count1 = alert['count1']
count2 = alert['count2']
try:
text = '- %s changed %d%% from %d to %d between %s and %s\n' % (
metric.name,
(count2 - count1) * 100 / max([count1, 1]),
count1,
count2,
alert['ts1'],
alert['ts2'])
except:
# In case number formatting errors occur, render something.
text = '- %s changed from %s to %s between %s and %s\n' % (
metric.name,
count1,
count2,
alert['ts1'],
alert['ts2'])
if 'comment' in alert:
text += ' - %s\n' % alert['comment']
return text
def render_alerts_summary(alerts):
no_comments = []
comment2alerts = defaultdict(list)
for metric, alert in alerts:
comment = alert.get('comment', None)
if comment:
comment2alerts[comment].append((metric, alert))
else:
no_comments.append((metric, alert))
sorted_keys = []
for comment, alerts in comment2alerts.iteritems():
alerts.sort(key=lambda x: x[1]['ts1'])
sorted_keys.append((alerts[0][1]['ts1'], comment))
text = '\n'
for _, comment in sorted(sorted_keys):
alerts = comment2alerts[comment]
text += '- %s\n' % comment
return text, no_comments
| {
"repo_name": "jfalkner/report_metrics",
"path": "report_metrics/utils/html.py",
"copies": "1",
"size": "7202",
"license": "mit",
"hash": -3564947311819123000,
"line_mean": 31.1517857143,
"line_max": 119,
"alpha_frac": 0.599555679,
"autogenerated": false,
"ratio": 3.4360687022900764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45356243812900765,
"avg_score": null,
"num_lines": null
} |
''' A collection of code to facilitate NPC generation using FATE-style aspects, personality traits, and other information rather than system-dependent stats. '''
# Date: Dec 26 2014
# Author: Alex Safatli
# Email: safatli@cs.dal.ca
class character(object):
def __init__(self,name,traits=None):
self.name = name
self.traits = []
if traits != None: self.traits = traits
def getName(self): return self.name
def getTraits(self): return self.traits
def hasTraits(self,trait):
return (trait in self.traits)
class npc(character):
def __init__(self,name,traits=None,profession='',home=None,work=None):
super(self,npc).__init__(name,traits)
self.profession = profession
self.home = home
self.quests = []
self.work = work
def getProfession(self): return self.profession
def getHome(self): return self.home
def getQuests(self): return self.quests
def getWork(self): return self.work
| {
"repo_name": "AlexSafatli/KingdomManager",
"path": "Pathfinder Kingdom Manager/Pathfinder Kingdom ManagerTests/npc.py",
"copies": "1",
"size": "1048",
"license": "mit",
"hash": -119219298964657490,
"line_mean": 29.8529411765,
"line_max": 161,
"alpha_frac": 0.6269083969,
"autogenerated": false,
"ratio": 3.7971014492753623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49240098461753623,
"avg_score": null,
"num_lines": null
} |
""" A collection of command-line tools for building encoded update.xml
files.
"""
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
from .info_file import InfoFile
import os
def files2xml(filenames):
""" Given a list of filenames, extracts the app version and log
information from accompanying files produces an output xml string.
There are no constraints or restrictions on the names or extensions
of the input files. They just need to be accompanied by a sidecar
file named similarly, but with a ".info" extension, that can be
loaded by the InfoFile class.
If there is no .info file for a filename or an error occurs while constructing it
a warning message is printed.
"""
_xmlheader = """<?xml version="1.0" encoding="ISO-8859-1"?>
<!-- DO NOT EDIT MANUALLY -->
<!-- Automatically generated file using traits.util.updates -->
"""
xmlparts = [_xmlheader]
for file in filenames:
#info_file_name = "{0}.info".format(file)
info_file_name = "%s.info" % (file,)
if not os.path.exists(info_file_name):
#print "Warning: {0} was not found.".format(info_file_name)
print "Warning: %s was not found." % (info_file_name,)
continue
try:
info = InfoFile.from_info_file(info_file_name)
xml_list = info.get_xml()
except:
#print "Warning: Failure in creating XML for {0}".format(info_file_name)
print "Warning: Failure in creating XML for %s" % (info_file_name,)
continue
xmlparts.append('<update_file>')
xmlparts.extend(xml_list)
xmlparts.append('</update_file>')
return "\n".join(xmlparts)
| {
"repo_name": "enthought/etsproxy",
"path": "enthought/util/updates/tools.py",
"copies": "1",
"size": "1741",
"license": "bsd-3-clause",
"hash": 6823409647689241000,
"line_mean": 31.8490566038,
"line_max": 85,
"alpha_frac": 0.6392877657,
"autogenerated": false,
"ratio": 4.002298850574713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023826343540195076,
"num_lines": 53
} |
#A Collection of commonly used fabric scripts.
import re
from fabric.api import env, local, settings
from fabric.context_managers import hide
from fabric.operations import put, get
import cuisine
from cuisine import run, sudo
from fabric.api import parallel
from random import choice
import string
import socket
import paramiko
# FIXME: select package after detecting appropriate system
cuisine.select_package('apt')
USER_NAME = 'dhilipsiva'
def hello_world():
run('echo "hello world"')
# vagrant mechine
def vagrant():
host = '127.0.0.1'
port = '2222'
for line in local('vagrant ssh-config', capture=True).split('\n'):
match = re.search(r'Hostname\s+(\S+)', line)
if match:
host = match.group(1)
continue
match = re.search(r'User\s+(\S+)', line)
if match:
env.user = match.group(1)
continue
match = re.search(r'Port\s+(\S+)', line)
if match:
port = match.group(1)
continue
match = re.search(r'IdentityFile\s(.+)', line)
if match:
env.key_filename = match.group(1).reaplce('"','')
continue
env.hosts = ['{0}:{1}'.format(host, port)]
def _prepare():
sudo('export DEBCONF_TERSE=yes'
' DEBIAN_PRIORITY=critical'
' DEBIAN_FRONTEND=noninteractive')
def apt(pkg):
_prepare()
sudo("apt-get -qqyu install %s" % pkg)
def sync_time():
with settings(warn_only=True):
sudo("/etc/init.d/ntp stop")
sudo("ntpdate pool.ntp.org")
sudo("/etc/init.d/ntp start")
def setup_time_calibration():
sudo('apt-get -y install ntp')
put('config/ntpdate.cron', '%s/' % env.NEWSBLUR_PATH)
sudo('chmod 755 %s/ntpdate.cron' % env.NEWSBLUR_PATH)
sudo('mv %s/ntpdate.cron /etc/cron.hourly/ntpdate' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('/etc/cron.hourly/ntpdate')
def add_machine_to_ssh():
put("~/.ssh/id_dsa.pub", "local_keys")
run("echo `cat local_keys` >> .ssh/authorized_keys")
run("rm local_keys")
def setup_supervisor():
sudo('apt-get -y install supervisor')
def setup_sudoers():
sudo('su - root -c "echo \\\\"%s ALL=(ALL) NOPASSWD: ALL\\\\" >>'
' /etc/sudoers"' % env.user)
@parallel
def install(package):
"""Install a package"""
with settings(linewise=True, warn_only=True):
sudo("apt-get update")
for retry in range(2):
if sudo("apt-get -y install %s" % package).failed:
local("echo INSTALLATION FAILED FOR %s: was installing %s"
" $(date) >> ~/fail.log" % (env.host, package))
else:
break
@parallel
def install_auto(package):
"""Install a package answering yes to all questions"""
with settings(linewise=True, warn_only=True):
sudo("apt-get update")
sudo('DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get install -o'
' Dpkg::Options::="--force-confold" --force-yes -y %s'
% package)
def install_apache():
"""Install Apache server with userdir enabled"""
with settings(linewise=True, warn_only=True):
sudo("apt-get update")
sudo("apt-get -y install apache2")
if run("ls /etc/apache2/mods-enabled/userdir.conf").failed:
sudo("a2enmod userdir")
sudo("/etc/init.d/apache2 restart")
@parallel
def uninstall(package):
"""Uninstall a package"""
with settings(linewise=True, warn_only=True):
sudo("apt-get -y remove %s" % package)
@parallel
def update():
"""Update package list"""
with settings(linewise=True, warn_only=True):
sudo('apt-get -yqq update')
@parallel
def upgrade():
"""Upgrade packages"""
with settings(linewise=True, warn_only=True):
update()
sudo('aptitude -yvq safe-upgrade')
@parallel
def upgrade_auto():
"""Update apt-get and Upgrade apt-get answering yes to all questions"""
with settings(linewise=True, warn_only=True):
sudo("apt-get update")
sudo('apt-get upgrade -o Dpkg::Options::="--force-confold"'
' --force-yes -y')
@parallel
def user_add(new_user, passwd=False):
"""Add new user"""
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
if not passwd:
passwd = generate_passwd()
if not sudo("echo -e '%s\n%s\n' | adduser %s" % (passwd, passwd,
new_user)).failed:
if env.host == 'host1.local':
sudo("mkdir /home/%s/public_html" % new_user)
sudo("chown %s:%s /home/%s/public_html/" % new_user)
@parallel
def user_passwd(user, passwd=False):
"""Change password for user"""
with settings(hide('running', 'stdout', 'stderr'), warn_only=True):
if not passwd:
passwd = generate_passwd()
sudo("echo -e '%s\n%s' | passwd %s" % (
passwd, passwd, user))
@parallel
def user_delete(user):
"""Delete user"""
with settings(linewise=True, warn_only=True):
sudo("deluser %s" % user)
def status():
"""Display host status"""
with settings(linewise=True, warn_only=True):
run("uptime")
run("uname -a")
@parallel
def shut_down():
"""Shut down a host"""
sudo("shutdown -P 0")
@parallel
def reboot():
"""Reboot a host"""
sudo("shutdown -r 0")
def file_put(localpath, remotepath):
"""Put file from local path to remote path"""
with settings(linewise=True, warn_only=True):
put(localpath, remotepath)
def file_get(remotepath, localpath):
"""Get file from remote path to local path"""
with settings(linewise=True, warn_only=True):
get(remotepath, localpath + '.' + env.host)
def file_remove(remotepath):
"""Remove file at remote path"""
with settings(linewise=True, warn_only=True):
sudo("rm -r %s" % remotepath)
def generate_passwd(length=10):
return ''.join(choice(string.ascii_letters +
string.digits) for _ in range(length))
def ssh_disable_passwd():
"""Disable SSH password authentication"""
with settings(hide('running', 'user'), warn_only=True):
sudo('echo PasswordAuthentication no >> /etc/ssh/sshd_config')
sudo('service ssh restart')
#copy archived git repo
def copy_source():
local('git archive $(git symbolic-ref HEAD 2>/dev/null) '
'| bzip2 > /tmp/app_name.tar.bz2')
remote_filename = '/tmp/app_name.tar.bz2'
code_dir = '~/app_name'
sudo('rm -rf %s' % code_dir)
if cuisine.file_exists(remote_filename):
sudo('rm %s' % remote_filename)
cuisine.file_upload(remote_filename, '/tmp/app_name.tar.bz2')
with cuisine.mode_sudo():
run('mkdir -p %s' % code_dir)
cuisine.file_attribs(remote_filename)
run('tar jxf %s -C %s' % (remote_filename, code_dir))
run('rm %s' % (remote_filename,))
def target():
host = 'A Target IP or name'
port = 22
env.hosts = ['{0}:{1}'.format(host, port)]
env.user = 'ubuntu'
env.key_filename = 'key file name'
def create_user():
cuisine.user_ensure(USER_NAME,
home='/home/%s' % USER_NAME, shell='/bin/bash')
cuisine.group_user_ensure('www-data', USER_NAME)
def create_virtualenv():
if not cuisine.dir_exists('/home/%s/ENV' % USER_NAME):
sudo('virtualenv -q --distribute '
'/home/%s/ENV' % (
USER_NAME), user=USER_NAME)
def run_in_virtualenv(cmd):
with run('. /home/%s/ENV/bin/activate' % USER_NAME):
run(cmd)
def is_host_up(host, counter=0):
print('%d : Attempting connection to host: %s' %
(counter, host))
original_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1)
host_up = True
try:
paramiko.Transport((host, 22))
except Exception, e:
host_up = False
print('%s down, %s' % (host, e))
finally:
socket.setdefaulttimeout(original_timeout)
return host_up
def try_to_connect():
counter = 0
while not is_host_up(env.host, counter):
counter += 1
def ssh():
try:
if env.host == AWS_HOST:
local('ssh -i rewire.pem ubuntu@' + AWS_HOST)
except:
pass
| {
"repo_name": "dhilipsiva/snippets",
"path": "fabfile.py",
"copies": "1",
"size": "8256",
"license": "mit",
"hash": -1632338757591171000,
"line_mean": 25.8925081433,
"line_max": 75,
"alpha_frac": 0.597504845,
"autogenerated": false,
"ratio": 3.3780687397708675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4475573584770868,
"avg_score": null,
"num_lines": null
} |
'''A collection of common threadsafe data structures, adapted to the dnutils thread model'''
'''A multi-producer, multi-consumer queue.'''
from dnutils import threads
from collections import deque
from heapq import heappush, heappop
from time import monotonic as time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threads.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threads.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threads.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threads.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def __len__(self):
return self.qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop() | {
"repo_name": "danielnyga/dnutils",
"path": "src/dnutils/threadsafe.py",
"copies": "1",
"size": "8845",
"license": "mit",
"hash": -3840604452834169000,
"line_mean": 36.8034188034,
"line_max": 92,
"alpha_frac": 0.5997738836,
"autogenerated": false,
"ratio": 4.476214574898785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5575988458498785,
"avg_score": null,
"num_lines": null
} |
"""A collection of converters, and converter creators
For the purposes of this module, a converter is any function that takes a single value and returns
another value.
"""
from decimal import Decimal
from uuid import UUID, uuid4
from bidon.util import date
def identity(val):
"""Returns the value it is given."""
return val
def to_int(val):
"""Convert val to an int."""
if isinstance(val, int):
return val
return int(val)
def to_float(val):
"""Convert val to a float."""
if isinstance(val, float):
return val
return float(val)
def to_decimal(val):
"""Convert val to a decimal."""
if isinstance(val, Decimal):
return val
return Decimal(val)
def to_bit(val):
"""Convert val to a bit: either 1 if val evaluates to True, or False otherwise."""
return 1 if val else 0
def to_bool(val):
"""Convert val to a boolean value."""
return bool(val)
def to_uuid(val):
"""Convert val to a uuid."""
if isinstance(val, UUID):
return val
return UUID(val)
def to_compressed_string(val, max_length=0):
"""Converts val to a compressed string.
A compressed string is one with no leading or trailing spaces.
If val is None, or is blank (all spaces) None is returned.
If max_length > 0 and the stripped val is greater than max_length, val[:max_length] is returned.
"""
if val is None or len(val) == 0:
return None
rval = " ".join(val.split())
if len(rval) == 0:
return None
if max_length == 0:
return rval
else:
return rval[:max_length]
def to_now(_):
"""Returns the current timestamp, with a utc timezone."""
return date.utc_now()
def to_none(_):
"""Returns None."""
return None
def to_true(_):
"""Returns True."""
return True
def to_false(_):
"""Returns False."""
return False
def to_empty_string(_):
"""Returns the empty string."""
return ""
def to_new_uuid(_):
"""Returns a new randomly generated UUID."""
return uuid4()
def to_date(val, fmt=None):
return date.parse_date(val, fmt)
def to_time(val, fmt=None):
return date.parse_time(val, fmt)
def to_datetime(val, fmt=None):
return date.parse_datetime(val, fmt)
def to_formatted_datetime(fmt):
"""Returns a datetime converter using fmt."""
return lambda val: to_datetime(val, fmt)
def incrementor(start=0, step=1):
"""Returns a function that first returns the start value, and returns previous value + step on
each subsequent call.
"""
def fxn(_):
"""Returns the next value in the sequnce defined by [start::step)"""
nonlocal start
rval = start
start += step
return rval
return fxn
def string_trimmer(max_length=0):
"""The same as partial(to_compressed_string(max_length=max_length))"""
return lambda val: to_compressed_string(val, max_length)
def static_value(val):
"""Returns a function that always returns val."""
return lambda _: val
def rounded_decimal(places):
"""Returns a lambda that converts its value to a decimal and then rounds the value by places."""
return lambda val: round(to_decimal(val), places)
def index_resolver(index, strict=False):
"""Returns a function that accepts a value and returns index[value]."""
if strict:
return lambda id_: index[id_]
else:
return index.get
def accept_none_wrapper(fxn):
"""Wraps a function, returning None if None is passed in, and otherwise passing the
value along to the given function.
"""
def wrapper(val):
"""If val is None, return None, otherwise pass the value along to fxn."""
if val is None:
return None
else:
return fxn(val)
return wrapper
def try_wrapper(fxn):
"""Wraps a function, returning (True, fxn(val)) if successful, (False, val) if not."""
def wrapper(val):
"""Try to call fxn with the given value. If successful, return (True, fxn(val)), otherwise
returns (False, val).
"""
try:
return (True, fxn(val))
except Exception:
return (False, val)
return wrapper
| {
"repo_name": "treycucco/bidon",
"path": "bidon/util/convert.py",
"copies": "1",
"size": "3963",
"license": "mit",
"hash": -5189435964128649000,
"line_mean": 20.8950276243,
"line_max": 99,
"alpha_frac": 0.6684330053,
"autogenerated": false,
"ratio": 3.615875912408759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47843089177087594,
"avg_score": null,
"num_lines": null
} |
"""A collection of converters, and converter creators
For the purposes of this module, a converter is any function that takes a single value and returns
another value.
"""
import arrow
from decimal import Decimal
from uuid import UUID, uuid4
from datetime import datetime, timezone, date
from numbers import Number
def identity(v):
return v
def to_int(v):
if isinstance(v, int):
return v
return int(v)
def to_float(v):
if isinstance(v, float):
return v
return float(v)
def to_decimal(v):
if isinstance(v, Decimal):
return v
return Decimal(v)
def to_bit(v):
return 1 if v else 0
def to_bool(v):
return bool(v)
def to_uuid(v):
if isinstance(v, UUID):
return v
return UUID(v)
def to_compressed_string(s, max_length=0):
if s is None or len(s) == 0:
return None
rv = " ".join(s.split())
if len(rv) == 0:
return None
if max_length == 0:
return rv
else:
return rv[:max_length]
def to_date(v, fmt=None):
return _to_datetime(v, fmt).datetime
def to_datetime(v, fmt=None):
return _to_datetime(v, fmt).datetime
def to_time(v, fmt="HH:mm:ss"):
return _to_datetime(v, fmt).time()
def _to_datetime(v, fmt=None):
if v is None:
raise Exception("Invalid date subject")
if fmt is None or not isinstance(v, str):
return arrow.get(v)
else:
return arrow.get(v, fmt)
# if isinstance(v, (datetime, date)):
# return v
# n = None
# if isinstance(v, str):
# try:
# n = float(v)
# except ValueError:
# return datetime.strptime(v, fmt)
# if n is None:
# if isinstance(v, Number):
# n = v
# else:
# raise ValueError("Unable to parse {0} as a datetime".format(v))
# return datetime.fromtimestamp(n)
def to_now(_):
return datetime.utcnow().replace(tzinfo=timezone.utc)
def to_none(_):
return None
def to_true(_):
return True
def to_false(_):
return False
def to_empty_string(_):
return ""
def to_new_uuid(_):
return uuid4()
def incrementor(start=0, step=1):
def fx(_):
nonlocal start
rv = start
start += step
return rv
return fx
def string_trimmer(max_length=0):
return lambda s: to_compressed_string(s, max_length)
def static_value(v):
return lambda _: v
def formatted_datetime(fmt):
return lambda v: _to_datetime(v, fmt).datetime
def rounded_decimal(places):
return lambda v: round(to_decimal(v), places)
def index_resolver(index, strict=False):
if strict:
return lambda id_: index[id_]
else:
return lambda id_: index.get(id_)
def accept_none_wrapper(fx):
def wrapper(v):
if v is None:
return None
else:
return fx(v)
return wrapper
def try_wrapper(fx):
def wrapper(v):
try:
return (True, fx(v))
except Exception:
return (False, v)
return wrapper
| {
"repo_name": "treycucco/py-utils",
"path": "idb/util/convert.py",
"copies": "1",
"size": "2814",
"license": "bsd-3-clause",
"hash": 5513156658791072000,
"line_mean": 15.650887574,
"line_max": 98,
"alpha_frac": 0.6417910448,
"autogenerated": false,
"ratio": 3.1796610169491526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43214520617491525,
"avg_score": null,
"num_lines": null
} |
"""A collection of different tools Samantha might use.
- Updater, to monitor it's sources on GitHub and automatically update to newer
versions, if available and if a certain series of tests passes
"""
###############################################################################
# pylint: disable=global-statement
#
# TODO: [ ] Updater
# TODO: [ ] Monitor Sources for the modules
# TODO: [ ] Test new versions
# TODO: [ ] replace them on-the-go if tests are passed
# TODO: [ ] keep the old version as backup for a certain time (maybe check
# every 24h and discard old versions 24h later if nothing's gone
# wrong?)
#
###############################################################################
# standard library imports
import logging
# related third party imports
# application specific imports
from . import eventbuilder
from . import server
__version__ = "1.3.8"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
# Set constants
INITIALIZED = False
INPUT = None
OUTPUT = None
LOGGER.debug("I was imported.")
def _init(queue_in, queue_out):
"""Initialize the module."""
global INPUT, OUTPUT
LOGGER.info("Initializing...")
INPUT = queue_in
OUTPUT = queue_out
# initialize all tools
eventbuilder.initialize(queue_in, queue_out)
server.initialize(queue_in, queue_out)
LOGGER.info("Initialisation complete.")
return True
def stop():
"""Stop the module."""
global INITIALIZED
LOGGER.info("Exiting...")
INITIALIZED = False
# Stop all tools
eventbuilder.stop()
server.stop()
LOGGER.info("Exited.")
return True
def initialize(queue_in, queue_out):
"""Initialize the module when not yet initialized."""
global INITIALIZED
if not INITIALIZED:
INITIALIZED = _init(queue_in, queue_out)
else:
LOGGER.info("Already initialized!")
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/tools/__init__.py",
"copies": "1",
"size": "1914",
"license": "mit",
"hash": 4407502659395351000,
"line_mean": 22.0602409639,
"line_max": 79,
"alpha_frac": 0.6128526646,
"autogenerated": false,
"ratio": 4.253333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 83
} |
"""A collection of dpa-specific nuke nodes."""
from collections import defaultdict
import os
import re
import nuke
from dpa.ptask import PTask
from dpa.ptask.area import PTaskArea, PTaskAreaError
from dpa.ptask.spec import PTaskSpec
# -----------------------------------------------------------------------------
# subscribed product representation cache
SUBD_REPR_CACHE = []
PRODUCT_REPR_STR_TO_PATH = {}
DEFAULT_REPR_STR = 'Please select a subscription...'
# -----------------------------------------------------------------------------
def get_default_product_name():
name = "Comp"
ptask_area = PTaskArea.current()
if ptask_area:
name = PTaskSpec.name(ptask_area.spec) + name
return name
# -----------------------------------------------------------------------------
def create_write_product_node():
node = nuke.createNode('Write', inpanel=True)
node_name = 'WriteProduct'
node_inst = 1
while nuke.exists(node_name + str(node_inst)):
node_inst += 1
node_name += str(node_inst)
node.knob('name').setValue(node_name)
node.knob('beforeRender').setValue(
'dpa.nuke.utils.create_product_before_render()')
node.knob('afterFrameRender').setValue(
'dpa.nuke.utils.set_permissions_after_frame()')
products_tab = nuke.Tab_Knob("Product")
node.addKnob(products_tab)
node.addKnob(nuke.EvalString_Knob('product_desc', 'description', ""))
node.addKnob(nuke.EvalString_Knob('product_name', 'name',
get_default_product_name()))
node.addKnob(nuke.EvalString_Knob('product_ver_note', 'description', ""))
# hide the file knob
node.knob('file_type').setValue('exr')
node.knob('product_ver_note').setVisible(False)
# -----------------------------------------------------------------------------
def get_import_dir(product_repr, product=None, area=None, relative_to=None):
if not area:
area = PTaskArea.current()
if not product:
product = product_repr.product_version.product
try:
import_dir = area.dir(dir_name='import', path=True)
except PTaskAreaError:
raise Exception("Could not find import directory!")
import_dir = os.path.join(
import_dir, 'global', product.name, product.category,
product_repr.type, product_repr.resolution
)
if relative_to:
import_dir = os.path.relpath(import_dir, relative_to)
return import_dir
# -----------------------------------------------------------------------------
def populate_sub_cache(ptask_version=None, refresh=False):
if not ptask_version:
ptask_area = PTaskArea.current()
ptask = PTask.get(ptask_area.spec)
if ptask_area.version:
ptask_version = ptask.version(ptask_area.version)
else:
ptask_version = ptask.latest_version
nuke_file = nuke.root().name()
nuke_dir = os.path.dirname(nuke_file)
if refresh or not SUBD_REPR_CACHE:
for sub in ptask_version.subscriptions:
for product_repr in sub.product_version.representations:
product = product_repr.product_version.product
if product.category != 'imgseq':
continue
product_repr_str = product.name_spec + ' @ ' + \
product_repr.type
if product_repr.resolution != 'none':
product_repr_str += PTaskSpec.SEPARATOR + \
product_repr.resolution
sub_import_dir = get_import_dir(product_repr,
product=product, area=ptask_area, relative_to=nuke_dir)
# populate cache lookups
SUBD_REPR_CACHE.append(product_repr)
PRODUCT_REPR_STR_TO_PATH[product_repr_str] = \
sub_import_dir
# -----------------------------------------------------------------------------
def read_sub_knob_changed(node=None, knob=None):
if not node:
node = nuke.thisNode()
if not knob:
knob = nuke.thisKnob()
if knob.name() == 'product_repr_select':
product_repr_str = node['product_repr_select'].value()
if (product_repr_str == DEFAULT_REPR_STR or
product_repr_str not in PRODUCT_REPR_STR_TO_PATH):
node['product_seq_select'].setValues([])
node['file'].setValue('')
return
repr_dir = PRODUCT_REPR_STR_TO_PATH[product_repr_str]
# populate the possible file names
file_specs = {}
frame_regex = re.compile('([,\w]+).(\d{4})\.(\w+)')
for file_name in os.listdir(repr_dir):
matches = frame_regex.search(file_name)
if matches:
(file_base, frame_num, file_ext) = matches.groups()
spec = file_base + '.####.' + file_ext
file_specs[spec] = None
file_specs = sorted(file_specs.keys())
node['product_seq_select'].setValues(file_specs)
file_str = os.path.join(repr_dir, file_specs[0])
node['file'].setValue(file_str)
if knob.name() == 'product_seq_select':
repr_dir = os.path.dirname(node['file'].value())
file_spec = node['product_seq_select'].value()
file_str = os.path.join(repr_dir, file_spec)
node['file'].setValue(file_str)
# -----------------------------------------------------------------------------
def create_read_sub_node():
node = nuke.createNode('Read', inpanel=True)
node_name = 'ReadSub'
node_inst = 1
while nuke.exists(node_name + str(node_inst)):
node_inst += 1
node_name += str(node_inst)
node.knob('name').setValue(node_name)
sub_tab = nuke.Tab_Knob("Sub")
# make sure the product reprs are cached
populate_sub_cache(refresh=False)
repr_str_list = [DEFAULT_REPR_STR]
repr_str_list.extend(sorted(PRODUCT_REPR_STR_TO_PATH.keys()))
product_repr_select = nuke.Enumeration_Knob(
'product_repr_select',
'subscription',
repr_str_list,
)
product_seq_select = nuke.Enumeration_Knob(
'product_seq_select',
'files',
[],
)
nuke.callbacks.addKnobChanged(read_sub_knob_changed,
nodeClass='Read', node=node)
node.addKnob(sub_tab)
node.addKnob(product_repr_select)
node.addKnob(product_seq_select)
# make the tab pop to front
node['Sub'].setFlag(0)
read_sub_knob_changed(node=node, knob=node.knob('product_repr_select'))
# -----------------------------------------------------------------------------
def update_all_read_sub_nodes():
read_sub_nodes = [node for node in nuke.allNodes(
filter='Read') if node.knob('product_repr_select')]
repr_str_list = [DEFAULT_REPR_STR]
repr_str_list.extend(sorted(PRODUCT_REPR_STR_TO_PATH.keys()))
print "UPDATING: " + str([n.name() for n in read_sub_nodes])
for node in read_sub_nodes:
product_repr_select = node.knob('product_repr_select')
product_seq_select = node.knob('product_seq_select')
cur_repr_value = product_repr_select.value()
cur_seq_value = product_seq_select.value()
product_repr_select.setValues(repr_str_list)
if cur_repr_value in repr_str_list:
product_repr_select.setValue(cur_repr_value)
read_sub_knob_changed(node=node, knob=product_repr_select)
seq_values = product_seq_select.value()
if cur_seq_value in seq_values:
product_seq_select.setValue(cur_seq_value)
else:
product_repr_select.setValue(DEFAULT_REPR_STR)
nuke.callbacks.addKnobChanged(read_sub_knob_changed,
nodeClass='Read', node=node)
# -----------------------------------------------------------------------------
def add_commands():
nuke.menu('Nodes').addCommand(
name='Image/WriteProduct',
command=create_write_product_node,
shortcut='w',
)
nuke.menu('Nodes').addCommand(
name='Image/ReadSub',
command=create_read_sub_node,
)
# -----------------------------------------------------------------------------
def on_load():
populate_sub_cache(refresh=True)
update_all_read_sub_nodes()
| {
"repo_name": "Clemson-DPA/dpa-pipe",
"path": "dpa/nuke/nodes.py",
"copies": "1",
"size": "8279",
"license": "mit",
"hash": -6019101552678284000,
"line_mean": 29.1054545455,
"line_max": 79,
"alpha_frac": 0.5512743085,
"autogenerated": false,
"ratio": 3.687750556792873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4739024865292873,
"avg_score": null,
"num_lines": null
} |
"""A collection of examples for CARLAEnvironment"""
import pygame
from tensorforce import Agent
from tensorforce.environments import CARLAEnvironment
def training_example(num_episodes: int, max_episode_timesteps: int):
# Instantiate the environment (run the CARLA simulator before doing this!)
env = CARLAEnvironment(debug=True)
# Create your own agent (here is just an example)
agent = Agent.create(agent='ppo',
environment=env,
max_episode_timesteps=max_episode_timesteps,
batch_size=1)
# Training loop (you couldn't use a Runner instead)
# `weights_dir` and `record_dir` are `None` to prevent saving and recording
env.train(agent=agent,
num_episodes=num_episodes, max_episode_timesteps=max_episode_timesteps,
weights_dir=None, record_dir=None)
pygame.quit()
def custom_env_example(num_episodes: int, max_episode_timesteps: int):
# import some libs
import carla
import numpy as np
from tensorforce.environments.carla_environment import CARLAEnvironment, SensorSpecs, env_utils
# Subclass `CARLAEnvironment` to customize it:
class MyCARLAEnvironment(CARLAEnvironment):
# Change actions space: (throttle, steer, brake, reverse)
ACTIONS_SPEC = dict(type='float', shape=(4,), min_value=-1.0, max_value=1.0)
DEFAULT_ACTIONS = np.array([0.0, 0.0, 0.0, 0.0])
# Define your own mapping: actions -> carla.VehicleControl
def actions_to_control(self, actions):
self.control.throttle = float((actions[0] + 1) / 2.0)
self.control.steer = float(actions[1])
self.control.brake = float((actions[2] + 1) / 2.0)
self.control.reverse = bool(actions[3] > 0)
self.control.hand_brake = False
# Define which sensors to use:
def default_sensors(self) -> dict:
sensors = super().default_sensors()
# Substitute the default rgb camera with a semantic segmentation camera
sensors['camera'] = SensorSpecs.segmentation_camera(position='front', attachment_type='Rigid',
image_size_x=self.window_size[0],
image_size_y=self.window_size[1],
sensor_tick=self.tick_time)
# Add a radar sensor
sensors['radar'] = SensorSpecs.radar(position='radar', sensor_tick=self.tick_time)
return sensors
# Define a default agent (only used if env.train(agent=None, ...))
def default_agent(self, **kwargs) -> Agent:
return Agent.create(agent='ppo',
environment=self,
max_episode_timesteps=kwargs.get('max_episode_timesteps'),
batch_size=1)
# Define your own reward function:
def reward(self, actions, time_cost=-2.0):
speed = env_utils.speed(self.vehicle)
speed_limit = self.vehicle.get_speed_limit()
if speed <= speed_limit:
speed_penalty = -1.0 if speed < speed_limit / 2 else 0.0
else:
speed_penalty = speed_limit - speed
return time_cost - self.collision_penalty * 2.0 + speed_penalty
def render(self, sensors_data: dict):
super().render(sensors_data)
env_utils.draw_radar_measurement(debug_helper=self.world.debug, data=sensors_data['radar'])
# Training:
env = MyCARLAEnvironment(debug=True)
env.train(agent=None, # pass None to use the default_agent
num_episodes=num_episodes, max_episode_timesteps=max_episode_timesteps,
weights_dir=None, record_dir=None)
pygame.quit()
| {
"repo_name": "reinforceio/tensorforce",
"path": "examples/carla_examples.py",
"copies": "1",
"size": "3902",
"license": "apache-2.0",
"hash": -4746415820002222000,
"line_mean": 41.4130434783,
"line_max": 106,
"alpha_frac": 0.5914915428,
"autogenerated": false,
"ratio": 4.098739495798319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021235026481697407,
"num_lines": 92
} |
"""A collection of extended path classes and methods
The classes all inherit from the original path.path
"""
import os
from fnmatch import fnmatch
from pysyte.types.lists import flatten
class PathError(Exception):
"""Something went wrong with a path"""
prefix = 'Path Error'
def __init__(self, message):
Exception.__init__(self, message)
class PathAssertions(object):
"""Assertions that can be made about paths"""
# pylint: disable=no-member
def assert_exists(self):
"""Raise a PathError if this path does not exist on disk"""
if not self.exists():
raise PathError(f'{self} does not exist')
return self
def assert_isdir(self):
"""Raise a PathError if this path is not a directory on disk"""
if not self.isdir():
raise PathError(f'{self} is not a directory')
return self
def assert_isfile(self):
"""Raise a PathError if this path is not a file on disk"""
if not self.isfile():
raise PathError(f'{self} is not a file')
return self
from path import Path as PPath # pylint: disable=wrong-import-position
class DotPath(PPath):
"""Some additions to the classic path class"""
# pylint: disable=abstract-method
def __repr__(self):
string = repr(f'{self}')
return f'<{self.__class__.__name__} {string}>'
# The / operator joins paths.
def __div__(self, child):
"""Join two path components, adding a separator character if needed.
If the result is a file return self.__file_class__(result)
>>> p = DotPath('/home/guido')
>>> p.__div__('fred') == p / 'fred' == p.joinpath('fred')
True
"""
if child:
result = os.path.join(self, child)
else:
result = str(self) # pylint: disable=redefined-variable-type
return self.as_existing_file(result)
__truediv__ = __div__
def __cmp__(self, other):
return cmp(str(self), str(other))
def _next_class(self, string):
return self.as_existing_file(string)
def basename(self):
return str(super().basename())
@property
def name(self):
return str(super().name)
def as_existing_file(self, filepath):
"""Return the file class for existing files only"""
if os.path.isfile(filepath) and hasattr(self, '__file_class__'):
return self.__file_class__(filepath) # pylint: disable=no-member
return self.__class__(filepath)
def parent_directory(self):
if str(self) == '/':
return None
return self.parent
def directory(self):
"""Return a path to the path's directory"""
return self.parent
def dirnames(self):
"""Split the dirname into individual directory names
An absolute path starts with an empty string, a relative path does not
>>> DotPath(u'/path/to/module.py').dirnames() == [u'', u'path', u'to']
True
>>> DotPath(u'path/to/module.py').dirnames() == [u'path', u'to']
True
"""
return self.dirname().split(os.path.sep)
def dirpaths(self):
"""Split the dirname into individual directory names
An absolute path starts with an empty string, a relative path does not
>>> p = DotPath(u'/path/to/x.py')
>>> p.paths == p.dirpaths()
True
"""
parts = self.parts()
result = [DotPath(parts[0] or '/')]
for name in parts[1:]:
result.append(result[-1] / name)
return result
def parts(self):
"""Split the path into parts like Pathlib
>>> expected = ['/', 'path', 'to', 'there']
>>> assert DotPath('/path/to/there').parts() == expected
"""
parts = self.split(os.path.sep)
parts[0] = parts[0] and parts[0] or '/'
return parts
def directories(self):
"""Split the dirname into individual directory names
No empty parts are included
>>> DotPath(u'path/to/module.py').directories() == [u'path', u'to']
True
>>> DotPath(u'/path/to/module.py').directories() == [u'path', u'to']
True
"""
return [d for d in self.dirnames() if d]
parents = property(
dirnames, None, None,
""" This path's parent directories, as a list of strings.
>>> DotPath(u'/path/to/module.py').parents == [u'', u'path', u'to']
True
""")
paths = property(
dirpaths, None, None,
""" This path's parent directories, as a sequence of paths.
>>> paths = DotPath('/usr/bin/vim').paths
>>> paths[-1].isfile() # vim might be a link
True
>>> paths[-2] == paths[-1].parent
True
>>> paths[-3] == paths[-2].parent
True
>>> paths[-4] == paths[-3].parent
True
>>> paths[-4] == paths[0]
True
""")
def short_relative_path_to(self, destination):
"""The shorter of either the absolute path of the destination,
or the relative path to it
>>> print(DotPath('/home/guido/bin').short_relative_path_to(
... '/home/guido/build/python.tar'))
../build/python.tar
>>> print(DotPath('/home/guido/bin').short_relative_path_to(
... '/mnt/guido/build/python.tar'))
/mnt/guido/build/python.tar
"""
relative = self.relpathto(destination)
absolute = self.__class__(destination).abspath()
if len(relative) < len(absolute):
return relative
return absolute
def short_relative_path_to_here(self):
"""A short path relative to current working directory"""
return self.short_relative_path_to(os.getcwd())
def short_relative_path_from_here(self):
"""A short path relative to self to the current working directory"""
return self.__class__(os.getcwd()).short_relative_path_to(self)
def walk_some_dirs(self, levels=-1, pattern=None):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
"""
if not levels:
yield self
raise StopIteration
levels -= 1
for child in self.dirs():
if pattern is None or child.fnmatch(pattern):
yield child
if levels:
for subsubdir in child.walk_some_dirs(levels, pattern):
yield subsubdir
def fnmatch_basename(self, glob):
if glob.startswith(os.path.sep):
glob = glob.lstrip(os.path.sep)
string = self.basename()
if fnmatch(string, glob):
return self
return None
def fnmatch_directory(self, glob):
if glob.startswith(os.path.sep) or glob.endswith(os.path.sep):
glob = glob.strip(os.path.sep)
if self.isdir():
string = self.basename()
else:
string = self.parent.basename()
if fnmatch(string, glob):
return self
return None
def fnmatch_directories(self, glob):
if glob.startswith(os.path.sep) or glob.endswith(os.path.sep):
glob = glob.strip(os.path.sep)
strings = reversed(self.directory().splitall()[1:])
for string in strings:
if fnmatch(string, glob):
return self
return None
def fnmatch_part(self, glob):
if self.fnmatch(glob):
return self
if self.fnmatch_basename(glob):
return self
if self.fnmatch_directory(glob):
return self
if self.fnmatch_directories(glob):
return self
return None
def __get_owner_not_implemented(self):
pass
def expandall(self):
try:
return self.expand().realpath().abspath()
except AttributeError:
return self.__class__(
os.path.abspath(os.path.realpath(
os.path.expanduser(os.path.expandvars(str(self)))
))
)
def same_path(self, other):
return self.expandall() == other.expandall()
@property
def hidden(self):
"""A 'hidden file' has a name starting with a '.'"""
s = str(self.basename())
return s and s[0] == '.'
def add_ext(self, *args):
"""Join all args as extensions
Strip any leading `.` from args
>>> source = makepath(__file__)
>>> new = source.add_ext('txt', '.new')
>>> assert new.name.endswith('.py.txt.new')
"""
exts = [(a[1:] if a[0] == '.' else a) for a in args]
string = '.'.join([self] + list(exts))
return makepath(string)
def add_missing_ext(self, ext):
"""Add that extension, if it is missing
>>> fred = makepath('fred.py')
>>> assert fred.add_missing_ext('.py') == fred
>>> assert fred.add_missing_ext('.txt').endswith('.py.txt')
"""
copy = self[:]
stem, old = os.path.splitext(copy)
if old == ext:
return makepath(self)
return self.add_ext(ext)
def ext_language(ext, exts=None):
"""Language of the extension in those extensions
If exts is supplied, then restrict recognition to those exts only
If exts is not supplied, then use all known extensions
>>> ext_language('.py') == 'python'
True
"""
languages = {
'.py': 'python',
'.py2': 'python2',
'.py3': 'python3',
'.sh': 'bash',
'.bash': 'bash',
'.pl': 'perl',
'.js': 'javascript',
'.txt': 'english',
}
ext_languages = {_: languages[_] for _ in exts} if exts else languages
return ext_languages.get(ext)
def find_language(script, exts=None):
"""Determine the script's language extension
>>> this_script = __file__.rstrip('c')
>>> find_language(makepath(this_script)) == 'python'
True
If exts are given they restrict which extensions are allowed
>>> find_language(makepath(this_script), ('.sh', '.txt')) is None
True
If there is no extension, but shebang is present, then use that
(Expecting to find "#!" in ~/.bashrc for this test,
but ~/.bashrc might not exist - then there's no language)
>>> bashrc = home() / '.bashrc'
>>> find_language(bashrc) in ('bash', None)
True
"""
if not script.isfile():
return None
if script.ext:
return ext_language(script.ext, exts)
shebang = script.shebang()
return shebang and str(shebang.name) or None
del PPath
class FilePath(DotPath, PathAssertions):
"""A path to a known file"""
def __div__(self, child):
raise PathError('%r has no children' % self)
__truediv__ = __div__
def __iter__(self):
for line in self.stripped_lines():
yield line
def try_remove(self):
"""Try to remove the file"""
self.remove()
def stripped_lines(self):
"""A list of all lines without trailing whitespace
If lines can not be read (e.g. no such file) then an empty list
"""
try:
return [l.rstrip() for l in self.lines(retain=False)]
except (IOError, UnicodeDecodeError):
return []
def stripped_whole_lines(self):
"""A list of all lines without trailing whitespace or blank lines"""
return [l for l in self.stripped_lines() if l]
def non_comment_lines(self):
"""A list of all non-empty, non-comment lines"""
return [l
for l in self.stripped_whole_lines()
if not l.startswith('#')]
def has_line(self, string):
for line in self:
if string == line:
return True
return False
def any_line_has(self, string):
for line in self:
if string in line:
return True
return False
def split_all_ext(self):
copy = self[:]
filename, ext = os.path.splitext(copy)
if ext == '.gz':
filename, ext = os.path.splitext(filename)
ext = f'{ext}.gz'
return self.__class__(filename), ext
split_ext = split_all_ext
def as_python(self):
"""The path to the file with a .py extension
>>> FilePath('/path/to/fred.txt').as_python()
<FilePath '/path/to/fred.py'>
"""
return self.extend_by('.py')
def extend_by(self, extension):
"""The path to the file changed to use the given extension
>>> FilePath('/path/to/fred').extend_by('.txt')
<FilePath '/path/to/fred.txt'>
>>> FilePath('/path/to/fred.txt').extend_by('..tmp')
<FilePath '/path/to/fred.tmp'>
>>> FilePath('/path/to/fred.txt').extend_by('fred')
<FilePath '/path/to/fred.fred'>
"""
copy = self[:]
filename, _ = os.path.splitext(copy)
return self.__class__(f'{filename}.{extension.lstrip(".")}')
def make_read_only(self):
"""chmod the file permissions to -r--r--r--"""
self.chmod(ChmodValues.readonly_file)
def cd(self): # pylint: disable=invalid-name
"""Change program's current directory to self"""
return cd(self.parent)
def dirname(self):
return DirectPath(os.path.dirname(self))
parent = property(dirname)
def shebang(self):
"""The #! entry from the first line of the file
If no shebang is present, return an empty string
"""
try:
try:
first_line = self.lines(retain=False)[0]
except (OSError, UnicodeDecodeError):
return ''
if first_line.startswith('#!'):
rest_of_line = first_line[2:].strip()
parts = rest_of_line.split(' ')
interpreter = parts[0]
return makepath(interpreter)
except IndexError:
pass
return ''
def mv(self, destination): # pylint: disable=invalid-name
return self.move(destination)
def make_file_exist(self):
"""Make sure the parent directory exists, then touch the file"""
self.parent.make_directory_exist()
self.parent.touch_file(self.name)
return self
@property
def language(self):
"""The language of this file"""
try:
return self._language
except AttributeError:
self._language = find_language(self, getattr(self, 'exts', None))
return self._language
@language.setter
def language(self, value):
self._langauge = value
def choose_language(self, exts):
self._exts = exts
self._language = ext_language(self.ext, exts)
class DirectPath(DotPath, PathAssertions):
"""A path which knows it might be a directory
And that files are in directories
"""
__file_class__ = FilePath
def __iter__(self):
for a_path in DotPath.listdir(self):
yield a_path
def __contains__(self, thing):
return self.contains(thing)
def contains(self, thing):
try:
_ = thing.contains
return str(thing).startswith(str(self))
except AttributeError:
return thing in str(self)
def directory(self):
"""Return a path to a directory.
Either the path itself (if it is a directory), or its parent)
"""
if self.isdir():
return self
return self.parent
def try_remove(self):
"""Try to remove the path
If it is a directory, try recursive removal of contents too
"""
if self.islink():
self.unlink()
elif self.isfile():
self.remove()
elif self.isdir():
self.empty_directory()
if self.isdir():
self.rmdir()
else:
return False
return True
def empty_directory(self):
"""Remove all contents of a directory
Including any sub-directories and their contents"""
for child in self.walkfiles():
child.remove()
for child in reversed([d for d in self.walkdirs()]):
if child == self or not child.isdir():
continue
child.rmdir()
def cd(self): # pylint: disable=invalid-name
"""Change program's current directory to self"""
return cd(self)
def listdir(self, pattern=None):
return [self.as_existing_file(_)
for _ in DotPath.listdir(self, pattern)]
def list_dirs(self, pattern=None):
return self.list_dirs_files(pattern)[0]
def list_files(self, pattern=None):
return self.list_dirs_files(pattern)[1]
def list_dirsfiles(self, pattern=None):
dirs, others = self.list_dirs_files(pattern)
return dirs + others
def list_dirs_files(self, pattern=None):
items = self.listdir(pattern)
dirs = [_ for _ in items if _.isdir()]
others = [_ for _ in items if not _.isdir()]
return dirs, others
def make_directory_exist(self):
if self.isdir():
return False
if os.path.exists(self):
raise PathError(f'{self} exists but is not a directory')
self.makedirs()
def make_file_exist(self, filename=None):
"""Make the directory exist, then touch the file
If the filename is None, then use self.name as filename
"""
if filename is None:
path_to_file = FilePath(self)
path_to_file.make_file_exist()
return path_to_file
else:
self.make_directory_exist()
path_to_file = self.touch_file(filename)
return FilePath(path_to_file)
def make_read_only(self):
"""chmod the directory permissions to -r-xr-xr-x"""
self.chmod(ChmodValues.readonly_directory)
def touch_file(self, filename):
"""Touch a file in the directory"""
path_to_file = self.__file_class__(os.path.join(self, filename))
path_to_file.touch()
return path_to_file
def existing_sub_paths(self, sub_paths):
"""Those in the given list of sub_paths which do exist"""
paths_to_subs = [self / _ for _ in sub_paths]
return [_ for _ in paths_to_subs if _.exists()]
def clear_directory(self):
"""Make sure the directory exists and is empty"""
self.make_directory_exist()
self.empty_directory()
# pylint: disable=arguments-differ
def walkdirs(self, pattern=None, errors='strict', ignores=None):
ignored = ignore_globs(ignores)
for path_to_dir in super(DirectPath, self).walkdirs(pattern, errors):
if not ignored(path_to_dir.relpath(self)):
yield path_to_dir
# pylint: disable=arguments-differ
def walkfiles(self, pattern=None, errors='strict', ignores=None):
ignored = ignore_globs(ignores)
for path_to_file in super(DirectPath, self).walkfiles(pattern, errors):
if not ignored(path_to_file.relpath(self)):
yield path_to_file
def listfiles(self, pattern=None, ignores=None):
ignored = ignore_globs(ignores)
return [_ for _ in self.listdir(pattern) if _.isfile() and not ignored(_)]
def has_vcs_dir(self):
parts = self.splitall()
for vcs_dir in ('.git', '.svn', '.hg'):
if self.fnmatch_part(vcs_dir):
return True
return False
def ignore_globs(ignores):
def ignored(a_path):
if not ignores:
return False
for ignore in ignores:
if a_path.fnmatch_part(ignore):
return True
return False
return ignored
class ChmodValues(object):
# pylint: disable=too-few-public-methods
readonly_file = 0o444
readonly_directory = 0o555
def makepath(s, as_file=False):
"""Make a path from a string
Expand out any variables, home squiggles, and normalise it
See also http://stackoverflow.com/questions/26403972
"""
if s is None:
return None
if isinstance(s, DotPath):
return s
result = FilePath(s) if (os.path.isfile(s) or as_file) else DirectPath(s)
return result.expandall()
path = makepath # pylint: disable=invalid-name
def cd(path_to): # pylint: disable=invalid-name
"""cd to the given path
If the path is a file, then cd to its parent directory
Remember current directory before the cd
so that we can cd back there with cd('-')
"""
if path_to == '-':
if not cd.previous:
raise PathError('No previous directory to return to')
return cd(cd.previous)
if not hasattr(path_to, 'cd'):
path_to = makepath(path_to)
try:
previous = os.getcwd()
except OSError as e:
if 'No such file or directory' in str(e):
return False
raise
if path_to.isdir():
os.chdir(path_to)
elif path_to.isfile():
os.chdir(path_to.parent)
elif not os.path.exists(path_to):
return False
else:
raise PathError(f'Cannot cd to {path_to}')
cd.previous = previous
return True
try:
cd.previous = makepath(os.getcwd())
except (OSError, AttributeError):
cd.previous = None
def as_path(string_or_path):
"""Return the argument as a DirectPath
If it is already one, return it unchanged
If not, return the makepath()
"""
if isinstance(string_or_path, DirectPath):
return string_or_path
return makepath(string_or_path)
def string_to_paths(string):
for c in ':, ;':
if c in string:
return strings_to_paths(string.split(c))
return [makepath(string)]
def strings_to_paths(strings):
return [makepath(s) for s in strings]
def paths(strings):
return [p for p in strings_to_paths(strings) if p.exists()]
def split_directories(strings):
paths = strings_to_paths(strings)
return [_
for _ in paths
if _.isdir()], [_ for _ in paths if not _.isdir()]
def split_files(strings):
paths = strings_to_paths(strings)
return ([_ for _ in paths if _.isfile()],
[_ for _ in paths if not _.isfile()])
def split_directories_files(strings):
paths = strings_to_paths(strings)
return ([_ for _ in paths if _.isdir()],
[_ for _ in paths if _.isfile()],
[_ for _ in paths if not (_.isfile() or _.isdir())])
def files(strings):
return split_files(strings)[0]
def directories(strings):
return split_directories(strings)[0]
def home(sub_path = None):
return makepath('~') / sub_path
def pwd():
return makepath(os.getcwd())
here = pwd # pylint: disable=invalid-name
def first_dir(path_string):
"""Get the first directory in that path
>>> first_dir('usr/local/bin') == 'usr'
True
"""
parts = path_string.split(os.path.sep)
return parts[0]
def first_dirs(path_strings):
"""Get the roots of those paths
>>> first_dirs(['usr/bin', 'bin']) == ['usr', 'bin']
True
"""
return [first_dir(_) for _ in path_strings]
def unique_first_dirs(path_strings):
"""Get the unique roots of those paths
>>> unique_first_dirs(['usr/local/bin', 'bin']) == set(['usr', 'bin'])
True
"""
return set(first_dirs(path_strings))
def _names_in_directory(path_to_directory):
"""Get all items in the given directory
Swallow errors to give an empty list
"""
try:
return os.listdir(path_to_directory)
except OSError:
return []
def fnmatcher(pattern, path_to_directory=None, wanted=lambda x: True):
"""Gives a method to check if an item matches the pattern, and is wanted
If path_to_directory is given then items are checked there
By default, every item is wanted
"""
return lambda x: fnmatch(x, pattern) and wanted(os.path.join(path_to_directory, x) if path_to_directory else x)
def list_items(path_to_directory, pattern, wanted=lambda x: True):
"""All items in the given path which match the given glob and are wanted
By default, every path is wanted
"""
if not path_to_directory:
return set()
needed = fnmatcher(pattern, path_to_directory, wanted)
return [os.path.join(path_to_directory, name)
for name in _names_in_directory(path_to_directory)
if needed(name)]
def list_sub_directories(path_to_directory, pattern):
"""All sub-directories of the given directory matching the given glob"""
return list_items(path_to_directory, pattern, os.path.isdir)
def set_items(path_to_directory, pattern, wanted):
return set(list_items(path_to_directory, pattern, wanted))
def set_files(path_to_directory, pattern):
"""A list of all files in the given directory matching the given glob"""
return set_items(path_to_directory, pattern, os.path.isfile)
def contains_glob(path_to_directory, pattern, wanted=lambda x: True):
"""Whether the given path contains an item matching the given glob"""
if not path_to_directory:
return False
needed = fnmatcher(pattern, path_to_directory, wanted)
for name in _names_in_directory(path_to_directory):
if needed(name):
return True
return False
def contains_directory(path_to_directory, pattern):
"""Whether the given path contains a directory matching the given glob"""
return contains_glob(path_to_directory, pattern, os.path.isdir)
def contains_file(path_to_directory, pattern):
"""Whether the given directory contains a file matching the given glob"""
return contains_glob(path_to_directory, pattern, os.path.isfile)
def environ_paths(key):
return [makepath(_) for _ in os.environ[key].split(':')]
def environ_path(key):
return makepath(os.environ[key])
def default_environ_path(key, default):
return makepath(os.environ.get(key, default))
def add_star(string):
"""Add '.*' to string
>>> assert add_star('fred') == 'fred.*'
"""
suffix = '*' if '.' in string else '.*'
return f'{string}{suffix}'
def add_stars(strings):
"""Add '.*' to each string
>>> assert add_stars(['fred', 'Fred.']) == ['fred.*', 'Fred.*']
"""
paths_ = [strings] if isinstance(strings, str) else strings
return [add_star(p) for p in paths_]
def tab_complete_files(paths, globber):
return tab_complete(paths, globber, os.path.isfile)
def tab_complete_dirs(paths, globber):
return tab_complete(paths, globber, os.path.isdir)
def tab_complete(strings, globber=add_stars, select=os.path.exists):
"""Finish path names "left short" by bash's tab-completion
strings is a string or strings
if any string exists as a path return those as paths
globber is a method which should return a list of globs
default: add_stars(['fred.']) == ['fred.*']
or, e.g: add_python(['fred', 'fred.']) == ['fred.py*']
if any expanded paths exist return those
select is a method to choose wanted paths
Defaults to selecting existing paths
"""
strings_ = [strings] if isinstance(strings, str) else strings
globs = flatten([globber(s) for s in strings_])
here_ = here()
matches = []
for glob_ in globs:
if '/' in glob_:
directory, base = os.path.split(glob_)
dir_ = here_ / directory
else:
dir_ = here_
base = glob_
match = [p for p in dir_.listdir() if p.fnmatch_basename(base)]
matches.extend(match)
result = [p for p in set(matches) if select(p)]
try:
result[0]
return result
except IndexError:
return strings
def pyc_to_py(path_to_file):
"""Change some file extensions to those which are more likely to be text
>>> pyc_to_py('vim.pyc') == 'vim.py'
True
"""
stem, ext = os.path.splitext(path_to_file)
if ext == '.pyc':
return f'{stem}.py'
return path_to_file
| {
"repo_name": "jalanb/dotsite",
"path": "pysyte/types/paths.py",
"copies": "1",
"size": "28258",
"license": "mit",
"hash": 8857817128804107000,
"line_mean": 28.4354166667,
"line_max": 115,
"alpha_frac": 0.5853917475,
"autogenerated": false,
"ratio": 3.958257459027875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043649206527875,
"avg_score": null,
"num_lines": null
} |
# A collection of failing integrals from the issues.
from sympy import (
integrate, Integral, exp, oo, pi, sign, sqrt, sin, cos, Piecewise,
tan, S, log, gamma, sinh, sec, acos, atan, sech, csch, DiracDelta, Rational
)
from sympy.utilities.pytest import XFAIL, SKIP, slow, skip, ON_TRAVIS
from sympy.abc import x, k, c, y, b, h, a, m, z, n, t
@SKIP("Too slow for @slow")
@XFAIL
def test_issue_3880():
# integrate_hyperexponential(Poly(t*2*(1 - t0**2)*t0*(x**3 + x**2), t), Poly((1 + t0**2)**2*2*(x**2 + x + 1), t), [Poly(1, x), Poly(1 + t0**2, t0), Poly(t, t)], [x, t0, t], [exp, tan])
assert not integrate(exp(x)*cos(2*x)*sin(2*x) * (x**3 + x**2)/(2*(x**2 + x + 1)), x).has(Integral)
@XFAIL
def test_issue_4212():
assert not integrate(sign(x), x).has(Integral)
@XFAIL
def test_issue_4491():
# Can be solved via variable transformation x = y - 1
assert not integrate(x*sqrt(x**2 + 2*x + 4), x).has(Integral)
@XFAIL
def test_issue_4511():
# This works, but gives a complicated answer. The correct answer is x - cos(x).
# If current answer is simplified, 1 - cos(x) + x is obtained.
# The last one is what Maple gives. It is also quite slow.
assert integrate(cos(x)**2 / (1 - sin(x))) in [x - cos(x), 1 - cos(x) + x,
-2/(tan((S.Half)*x)**2 + 1) + x]
@XFAIL
def test_integrate_DiracDelta_fails():
# issue 6427
assert integrate(integrate(integrate(
DiracDelta(x - y - z), (z, 0, oo)), (y, 0, 1)), (x, 0, 1)) == S.Half
@XFAIL
@slow
def test_issue_4525():
# Warning: takes a long time
assert not integrate((x**m * (1 - x)**n * (a + b*x + c*x**2))/(1 + x**2), (x, 0, 1)).has(Integral)
@XFAIL
@slow
def test_issue_4540():
if ON_TRAVIS:
skip("Too slow for travis.")
# Note, this integral is probably nonelementary
assert not integrate(
(sin(1/x) - x*exp(x)) /
((-sin(1/x) + x*exp(x))*x + x*sin(1/x)), x).has(Integral)
@XFAIL
@slow
def test_issue_4891():
# Requires the hypergeometric function.
assert not integrate(cos(x)**y, x).has(Integral)
@XFAIL
@slow
def test_issue_1796a():
assert not integrate(exp(2*b*x)*exp(-a*x**2), x).has(Integral)
@XFAIL
def test_issue_4895b():
assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, -oo, 0)).has(Integral)
@XFAIL
def test_issue_4895c():
assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, -oo, oo)).has(Integral)
@XFAIL
def test_issue_4895d():
assert not integrate(exp(2*b*x)*exp(-a*x**2), (x, 0, oo)).has(Integral)
@XFAIL
@slow
def test_issue_4941():
if ON_TRAVIS:
skip("Too slow for travis.")
assert not integrate(sqrt(1 + sinh(x/20)**2), (x, -25, 25)).has(Integral)
@XFAIL
def test_issue_4992():
# Nonelementary integral. Requires hypergeometric/Meijer-G handling.
assert not integrate(log(x) * x**(k - 1) * exp(-x) / gamma(k), (x, 0, oo)).has(Integral)
@XFAIL
def test_issue_16396a():
i = integrate(1/(1+sqrt(tan(x))), (x, pi/3, pi/6))
assert not i.has(Integral)
@XFAIL
def test_issue_16396b():
i = integrate(x*sin(x)/(1+cos(x)**2), (x, 0, pi))
assert not i.has(Integral)
@XFAIL
def test_issue_16161():
i = integrate(x*sec(x)**2, x)
assert not i.has(Integral)
# assert i == x*tan(x) + log(cos(x))
@XFAIL
def test_issue_16046():
assert integrate(exp(exp(I*x)), [x, 0, 2*pi]) == 2*pi
@XFAIL
def test_issue_15925a():
assert not integrate(sqrt((1+sin(x))**2+(cos(x))**2), (x, -pi/2, pi/2)).has(Integral)
@XFAIL
@slow
def test_issue_15925b():
if ON_TRAVIS:
skip("Too slow for travis.")
assert not integrate(sqrt((-12*cos(x)**2*sin(x))**2+(12*cos(x)*sin(x)**2)**2),
(x, 0, pi/6)).has(Integral)
@XFAIL
def test_issue_15925b_manual():
assert not integrate(sqrt((-12*cos(x)**2*sin(x))**2+(12*cos(x)*sin(x)**2)**2),
(x, 0, pi/6), manual=True).has(Integral)
@XFAIL
@slow
def test_issue_15227():
if ON_TRAVIS:
skip("Too slow for travis.")
i = integrate(log(1-x)*log((1+x)**2)/x, (x, 0, 1))
assert not i.has(Integral)
# assert i == -5*zeta(3)/4
@XFAIL
@slow
def test_issue_14716():
i = integrate(log(x + 5)*cos(pi*x),(x, S.Half, 1))
assert not i.has(Integral)
# Mathematica can not solve it either, but
# integrate(log(x + 5)*cos(pi*x),(x, S.Half, 1)).transform(x, y - 5).doit()
# works
# assert i == -log(Rational(11, 2))/pi - Si(pi*Rational(11, 2))/pi + Si(6*pi)/pi
@XFAIL
def test_issue_14709a():
i = integrate(x*acos(1 - 2*x/h), (x, 0, h))
assert not i.has(Integral)
# assert i == 5*h**2*pi/16
@slow
@XFAIL
def test_issue_14398():
assert not integrate(exp(x**2)*cos(x), x).has(Integral)
@XFAIL
def test_issue_14074():
i = integrate(log(sin(x)), (x, 0, pi/2))
assert not i.has(Integral)
# assert i == -pi*log(2)/2
@XFAIL
@slow
def test_issue_14078b():
i = integrate((atan(4*x)-atan(2*x))/x, (x, 0, oo))
assert not i.has(Integral)
# assert i == pi*log(2)/2
@XFAIL
def test_issue_13792():
i = integrate(log(1/x) / (1 - x), (x, 0, 1))
assert not i.has(Integral)
# assert i in [polylog(2, -exp_polar(I*pi)), pi**2/6]
@XFAIL
def test_issue_11845a():
assert not integrate(exp(y - x**3), (x, 0, 1)).has(Integral)
@XFAIL
def test_issue_11845b():
assert not integrate(exp(-y - x**3), (x, 0, 1)).has(Integral)
@XFAIL
def test_issue_11813():
assert not integrate((a - x)**Rational(-1, 2)*x, (x, 0, a)).has(Integral)
@XFAIL
def test_issue_11742():
i = integrate(sqrt(-x**2 + 8*x + 48), (x, 4, 12))
assert not i.has(Integral)
# assert i == 16*pi
@XFAIL
def test_issue_11254a():
assert not integrate(sech(x), (x, 0, 1)).has(Integral)
@XFAIL
def test_issue_11254b():
assert not integrate(csch(x), (x, 0, 1)).has(Integral)
@XFAIL
def test_issue_10584():
assert not integrate(sqrt(x**2 + 1/x**2), x).has(Integral)
@XFAIL
def test_issue_9723():
assert not integrate(sqrt(x + sqrt(x))).has(Integral)
@XFAIL
def test_issue_9101():
assert not integrate(log(x + sqrt(x**2 + y**2 + z**2)), z).has(Integral)
@XFAIL
def test_issue_7264():
assert not integrate(exp(x)*sqrt(1 + exp(2*x))).has(Integral)
@XFAIL
def test_issue_7147():
assert not integrate(x/sqrt(a*x**2 + b*x + c)**3, x).has(Integral)
@XFAIL
def test_issue_7109():
assert not integrate(sqrt(a**2/(a**2 - x**2)), x).has(Integral)
@XFAIL
def test_integrate_Piecewise_rational_over_reals():
f = Piecewise(
(0, t - 478.515625*pi < 0),
(13.2075145209219*pi/(0.000871222*t + 0.995)**2, t - 478.515625*pi >= 0))
assert abs((integrate(f, (t, 0, oo)) - 15235.9375*pi).evalf()) <= 1e-7
@XFAIL
def test_issue_4311_slow():
# Not slow when bypassing heurish
assert not integrate(x*abs(9-x**2), x).has(Integral)
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/integrals/tests/test_failing_integrals.py",
"copies": "2",
"size": "6857",
"license": "bsd-3-clause",
"hash": 8921861419278589000,
"line_mean": 23.3156028369,
"line_max": 188,
"alpha_frac": 0.5890331049,
"autogenerated": false,
"ratio": 2.572983114446529,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4162016219346529,
"avg_score": null,
"num_lines": null
} |
"""A collection of file system related utilities."""
import logging
import os
import sys
import time
import subprocess
import string
import tempfile
import stat
import shutil
if not hasattr(__builtins__, 'FileExistsError'):
FileExistsError = OSError
if not hasattr(__builtins__, 'FileNotFoundError'):
FileNotFoundError = OSError
logger = logging.getLogger('dropboxhandler.fstools')
# python2 does not allow open(..., mode='x')
def create_open(path):
"""Open a file for writing and raise if the file exists.
This should work like `open(path, mode='x')`, which is not
available in python26.
"""
if sys.version_info < (3, 3):
fd = os.open(path, os.O_CREAT | os.O_NOFOLLOW | os.O_WRONLY)
try:
file = os.fdopen(fd, 'w')
except OSError:
os.close(fd)
raise
else:
return file
else:
return open(path, mode='x')
if not hasattr(subprocess, 'check_output'):
def check_output(*args, **kwargs):
"""Python26 compatable version of `subprocess.check_output."""
kwargs['stdout'] = subprocess.PIPE
try:
proc = subprocess.Popen(*args, **kwargs)
stdout, stderr = proc.communicate()
except:
proc.kill()
proc.wait()
raise
retcode = proc.poll()
if retcode:
raise subprocess.CalledProcessError(
retcode, list(args[0])
)
return stdout
subprocess.check_output = check_output
def touch(path):
"""Create a new file."""
with create_open(path):
pass
def is_old(path):
"""Test if path has been modified during the last 5 days."""
modified = os.stat(path).st_mtime
age_seconds = time.time() - modified
if age_seconds > 60 * 60 * 24 * 5: # 5 days
return True
def write_checksum(file):
"""Compute checksums of file or of contents if it is a dir.
Checksums will be written to <inputfile>.sha256sum in the
format of the sha256sum tool.
If file is a directory, the checksum file will include the
checksums of all files in that dir.
"""
file = os.path.abspath(file)
basedir = os.path.split(file)[0]
checksum_file = str(file) + '.sha256sum'
files = subprocess.check_output(
[
'find',
os.path.basename(file),
'-type', 'f',
'-print0'
],
cwd=basedir,
).split(b'\0')[:-1]
if not files:
raise ValueError("%s has no files to checksum" % file)
try:
with open(checksum_file, 'wb') as f:
for file in files:
csum_line = subprocess.check_output(
['sha256sum', '-b', '--', file],
cwd=basedir,
)
csum = csum_line.split()[0]
base, ext = os.path.splitext(file)
if not len(csum) == 64:
raise ValueError('Could not parse sha256sum output')
f.write(csum_line)
except OSError:
logging.exception('Could not write checksum file. Does it exist?')
raise
def clean_filename(path):
"""Generate a sane (alphanumeric) filename for path."""
allowed_chars = string.ascii_letters + string.digits + '_.'
stem, suffix = os.path.splitext(os.path.basename(path))
cleaned_stem = ''.join(i for i in stem if i in allowed_chars)
if not cleaned_stem:
raise ValueError("Invalid file name: %s", stem + suffix)
if not all(i in allowed_chars + '.' for i in suffix):
raise ValueError("Bad file suffix: " + suffix)
return cleaned_stem + suffix
def _check_perms(path, userid=None, groupid=None, dirmode=None, filemode=None):
"""Raise `ValueError` if the permissions of `path` are not as expected.
Owner and group will only be checked for files, not for directories.
"""
if os.path.isdir(path):
if os.stat(path).st_mode % 0o1000 != dirmode:
raise ValueError("mode of dir %s should be %o but is %o" %
(path, dirmode, os.stat(path).st_mode % 0o1000))
elif os.path.islink(path):
raise ValueError("symbolic links are not allowed: %s" % path)
elif os.path.isfile(path):
if os.stat(path).st_mode % 0o1000 != filemode:
raise ValueError("mode of file %s should be %o but is %o" %
(path, filemode, os.stat(path).st_mode % 0o1000))
if userid and os.stat(path).st_uid != userid:
raise ValueError("userid of file %s should be %s but is %s" %
(path, userid, os.stat(path).st_uid))
if groupid and os.stat(path).st_gid != groupid:
raise ValueError("groupid of file %s should be %s but is %s" %
(path, groupid, os.stat(path).st_gid))
else:
raise ValueError("should be a regular file or dir: %s" % path)
def check_permissions(path, userid, groupid, dirmode, filemode):
"""Basic sanity check for permissions of file written by this daemon.
Raises ValueError, if permissions are not as specified, or for files
that are not regular files or directories.
"""
_check_perms(path, userid, groupid, dirmode, filemode)
for path, dirnames, filenames in os.walk(path):
_check_perms(path, userid, groupid, dirmode, filemode)
for name in filenames:
_check_perms(os.path.join(path, name),
userid, groupid, dirmode, filemode)
def recursive_copy(source, dest, tmpdir=None, perms=None, link=False):
"""Copy a file or directory to destination.
Arguments
---------
source : str
Path to the source file or directory
dest : str
Destination file name. This must not exists. Copying a file
into another directory by specifing the directory as destination
(as with the command line tool `cp`) is *not* supported. You need
to specify the whole destination path.
tmpdir : str
A temporary directory on the same file system as `dest`.
perms : dict, optional
Arguments to `fstools.check_permission`
link : bool
Weather files should be copied or hard-linked.
"""
source = os.path.abspath(source)
dest = os.path.abspath(dest)
if os.path.exists(dest):
raise ValueError("File exists: %s" % dest)
destbase, destname = os.path.split(dest)
tmpdir = tempfile.mkdtemp(dir=tmpdir)
workdest = os.path.join(tmpdir, destname)
logger.debug("Linking files in %s to workdir %s", source, workdest)
command = [
'cp',
'--no-dereference', # symbolic links could point anywhere
'--recursive',
'--no-clobber',
'--',
str(source),
str(workdest),
]
if link:
command.insert(1, '--link')
try:
subprocess.check_call(command, shell=False)
# remove symlinks from output
# os.fwalk would be better, but not for py<3.3
for root, dirs, files in os.walk(workdest):
for file in dirs + files:
path = os.path.join(root, file)
stats = os.lstat(path)
if stat.S_IFMT(stats.st_mode) == stat.S_IFLNK:
raise ValueError(
"Symbolic links are not allowed. %s is a link to %s" %
(path, os.readlink(path))
)
if perms is not None:
logger.debug("Checking permissions: %s", perms)
check_permissions(workdest, **perms)
logger.debug("Created links in workdir. Moving to destination")
if os.path.exists(dest):
raise ValueError("Destination exists: %s", dest)
os.rename(workdest, dest)
except BaseException: # even for SystemExit
logger.error("Got exception before we finished copying files. " +
"Rolling back changes")
if os.path.exists(dest):
shutil.rmtree(dest)
raise
finally:
shutil.rmtree(tmpdir)
| {
"repo_name": "qbicsoftware/dropboxhandler",
"path": "dropboxhandler/fstools.py",
"copies": "2",
"size": "8109",
"license": "mit",
"hash": 804843549285486200,
"line_mean": 31.9634146341,
"line_max": 79,
"alpha_frac": 0.5860155383,
"autogenerated": false,
"ratio": 4.014356435643564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5600371973943564,
"avg_score": null,
"num_lines": null
} |
"""A collection of functional primitives"""
from functools import wraps, partial
from functools import reduce as _reduce
from inspect import getargspec
def curry(fun):
"""
A working but dirty version of a currying function/decorator.
"""
def _internal_curry(fun, original=None, given=0):
if original is None:
original = fun
spec = getargspec(original)
opt = len(spec.defaults or [])
needed = len(spec.args) - given - opt
@wraps(fun)
def internal(*args, **kwargs):
"""The internal currying function"""
if len(args) >= needed:
return fun(*args, **kwargs)
else:
return _internal_curry(wraps(fun)(partial(fun, *args, **kwargs)),
original,
given=len(args))
return internal
return _internal_curry(fun)
@curry
def map(fun, values):
"""
A function that maps a function to a list of values and returns the new generator.
Complexity: O(n*k) where k is the complexity of the given function
params:
fun: the function that should be applied
values: the list of values we should map over
returns:
the new generator
"""
return (fun(value) for value in values)
@curry
def filter(fun, values):
"""
A function that filters a list of values by a predicate function and returns
a generator.
Complexity: O(n*k) where k is the complexity of the given function
params:
fun: the fucntion that should be applied
values: the list of values we should filter
returns:
the new generator
"""
return (value for value in values if fun(value))
@curry
def reduce(fun, init, values=None):
"""
A function that reduces a list to a single value using a given function.
Complexity: O(n*k) where k is the complexity of the given function
params:
fun: the function that should be applied
values: the list of values we should reduce
returns:
the reduced value
"""
if values is None:
return _reduce(fun, init)
else:
return _reduce(fun, values, init)
@curry
def apply(fun, args, kwargs=None):
"""
applies a list of arguments (and an optional dict of keyword arguments)
to a function.
Complexity: O(k) where k is the complexity of the given function
params:
fun: the function that should be applied
args: the list of values we should reduce
returns:
the reduced value
"""
if kwargs is None:
kwargs = {}
return fun(*args, **kwargs)
def pipe(*funs):
"""
composes a bunch of functions. They will be applied one after the other.
Complexity: depends on the given functions
params:
*funs: the functions that should be chained
returns: the chained function
"""
def internal(*args, **kwargs):
"""The internal piping function"""
return reduce(lambda acc, fun: fun(acc),
funs[0](*args, **kwargs),
funs[1:])
return internal
def compose(*funs):
"""
composes a bunch of functions. They will be applied in reverse order
(so this function is the reverse of pipe).
Complexity: depends on the given functions
params:
*funs: the functions that should be chained
returns: the chained function
"""
return apply(pipe, reversed(funs))
def starpipe(*funs):
"""
composes a bunch of functions. They will be applied one after the other.
The arguments will be passed as star args.
Complexity: depends on the given functions
params:
*funs: the functions that should be chained
returns: the chained function
"""
def internal(*args, **kwargs):
"""The internal piping function"""
return reduce(lambda acc, fun: fun(*acc),
funs[0](*args, **kwargs),
funs[1:])
return internal
def starcompose(*funs):
"""
composes a bunch of functions. They will be applied in reverse order
(so this function is the reverse of starpipe). Like in starpipe, arguments
will be passed as starargs.
Complexity: depends on the given functions
params:
*funs: the functions that should be chained
returns: the chained function
"""
return apply(starpipe, reversed(funs))
def identity(value):
"""
The identity function. Takes a value and returns it.
Complexity: O(1)
params:
value: the value
returns: the value
"""
return value
@curry
def tap(fun, value):
"""
A function that takes a function and a value, applies the function
to the value and returns the value.
Complexity: O(k) where k is the complexity of the given function
params:
fun: the function
value: the value
returns: the value
"""
fun(value)
return value
def constantly(value):
"""
A generator that returns the given value forever.
Complexity: O(1)
params:
value: the value to return
returns: an infinite generator of the value
"""
while True:
yield value
def delay(fun, *args, **kwargs):
"""
A function that takes a function and its arguments and delays its execution
until it is needed. It also caches the executed return value and prevents
it from being executed again (always returning the first result).
params:
fun: the function
args: the function's args
kwargs: the function's keyword arguments
returns: the function result
"""
# this is a horrible hack around Python 2.x's lack of nonlocal
_int = ["__delay__unset"]
@wraps(fun)
def internal():
"""The internal delay function"""
if _int[0] == "__delay__unset":
_int[0] = fun(*args, **kwargs)
return _int[0]
return internal
@curry
def flip(fun, first, second, *args):
"""
Takes a function and applies its arguments in reverse order.
params:
fun: the function
first: the first argument
second: the second argument
args: the remaining args (this weird first, second, args thing
is there to prevent preemptive passing of arguments)
returns: the result of the function fun
"""
return apply(fun, reversed(args + (first, second)))
| {
"repo_name": "hellerve/hawkweed",
"path": "hawkweed/functional/primitives.py",
"copies": "1",
"size": "6447",
"license": "mit",
"hash": -2249466787256780800,
"line_mean": 27.78125,
"line_max": 86,
"alpha_frac": 0.6219947262,
"autogenerated": false,
"ratio": 4.289421157684631,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005703430561830625,
"num_lines": 224
} |
"""A collection of functional-programming primitives, loosely modeled after
ramda.js (http://ramdajs.com/).
Basic philosophy for all functions in this module:
- Prefer immutable data structures; particularly, prefer tuples over lists.
- Pure functions (none of the functions in this module have any observable side
effects, but some use in-place mutations for efficiency or practicality).
- Composability: function arguments are ordered to maximize composability, that
is, the most variable argument comes last.
- Total functions: functions should accept *any* value, at least for their
documented parameter types, and should never raise exceptions for arguments
of the correct types. This means that, for example, the head() function will
return None rather than raise an exception when you pass an empty list, and
likewise, the prop() function will return None when you try to get a
nonexistent property from a dict.
Other design principles:
- Strings and bytestrings should behave like scalars, not lists-of-characters /
list-of-bytes, in most situations.
- While our functions are pure, we do not take extra measures to avoid
manipulation of data we handle from outside. If you pass in a mutable data
structure, we will not manipulate it in place, but we may return the original
data structure when no modifications are required, and we may return
references to parts of it rather than copying said parts. For example, the
prop() function will return a reference to the object stored at a particular
key, not a deep copy; and the identity() function will never attempt to copy
its argument.
"""
def fmap(f, d):
""" A generalized "map()", similar to the "fmap" function in Haskell.
Applies the function "f" in the functor "d". For the purposes of this
function, the following things are considered functors:
- Dictionary-like objects (anything that has an .items() function); here,
"f" is applied to the values, keeping the keys and key-value relationships
intact.
- Iterables; "f" is applied to all the values, ordering is kept intact.
- Functions; returns a new function that passes its arguments to "d" and
then applied "f" to the return value.
- Anything else is considered an option, "f" is applied if "d" is not None.
"""
if hasattr(d, "items") and callable(d.items):
return dict( ( (k, f(v)) for k, v in d.items() ) )
elif hasattr(d, "__iter__"):
return map(f, d)
elif callable(d):
def wrapped(*args, **kwargs):
retval = d(*args, **kwargs)
if retval is None:
return None
else:
return f(retval)
return wrapped
elif d is None:
return None
else:
return f(d)
def assoc(key, val, obj=None):
"""Add ("associate") "val" as "key" to "obj". "obj" should be a
dictionary-like object; if it is None, then a new empty dict will be
created. The return value will always be a dict, casting the original
argument as needed.
"""
new_obj = {} if obj is None else dict(obj)
new_obj[key] = val
return new_obj
def assocs(keyvals, obj=None):
"""Associate multiple key/value pairs into "obj". "keyvals" should be a
list-like collection of pairs (2-tuples or other 2-element list-likes).
"""
new_obj = obj
for k,v in keyvals:
new_obj = assoc(k, v, new_obj)
return new_obj
def dissoc(key, obj):
"""Remove ("associate") "key" from "obj". "obj" should be a dictionary-like
object. The return value will always be a dict, casting the original
argument as needed.
"""
new_obj = dict(obj)
if key in new_obj:
new_obj.pop(key)
return new_obj
def _tuplize(t):
if type(t) is str or type(t) is bytes:
return (t,)
if t is None:
return ()
try:
return tuple(t)
except TypeError:
return (t,)
def cons(h, t=None):
"""Prepend a single element to a list-like structure. Always returns a
tuple.
"""
return (h,) + _tuplize(t)
def snoc(h, t=None):
"""Append a single element to a list-like structure. Always returns a
tuple.
"""
return _tuplize(t) + (h,)
def take(n, t):
"""Take the first "n" elements of list-like "t", or all of "t" if it is
shorter than "n" elements. Effectively, like t[:n], but always return a
tuple.
"""
if n <= 0:
return ()
return _tuplize(t)[:n]
def drop(n, t):
"""Take all but the first "n" elements of list-like "t", or an empty tuple
if "t" is shorter than "n" elements. Effectively, like t[n:], but always
return a tuple.
"""
if n is None:
return _tuplize(t)
if n <= 0:
return _tuplize(t)
return _tuplize(t)[n:]
def drop_end(n, t):
"""Like drop(), but remove elements from the end.
"""
if n <= 0:
return _tuplize(t)
return _tuplize(t)[:-n]
def take_end(n, t):
"""Like drop(), but take elements from the end.
"""
if n <= 0:
return ()
return _tuplize(t)[-n:]
def nth(n, t=None):
"""Get the nth element (0-based) from a list-like, or None if the list has
"n" or fewer elements.
"""
if t is None:
return None
n = int(n)
if n < 0:
return None
try:
return t[n]
except IndexError:
return None
def head(t):
"""Get the first element from a list-like, or None if it's empty.
"""
return nth(0, t)
def last(t):
"""Get the last element from a list-like, or None if it's empty.
"""
if t is None:
return None
return nth(len(t) - 1, t)
def tail(t):
"""Get all but the first elements from a list-like, as a tuple.
"""
return drop(1, t)
def fold(func, items, initial=None):
"""Left-leaning fold.
"""
accum = initial
for item in _tuplize(items):
accum = func(accum, item)
return accum
def concat(items):
"""List concatenation (actually tuples).
Takes a list-like of list-likes, turns each element into a tuple, and
concatenates all these tuples into one. Any element that is not a list-like
will be converted into a 1-element tuple, unless it is None, in which case
it is converted into an empty tuple.
"""
return fold(lambda x, y: _tuplize(x) + _tuplize(y), items, ())
def flatten(items):
"""Recursively flatten a nested data structure into a flat tuple of
elements. Data structures that are considered flattenable are:
- List-like collections except bytestrings and strings: lists, sets,
tuples, and anything else that has an __iter__() method
- Dictionary-like collections: dicts, and anything else that has a values()
method.
- None (which is treated as an empty list)
Anything else is considered a scalar value and gets added to the flattened
list unchanged.
"""
# special case for None:
if items is None:
return ()
# special case for strings and bytestrings:
if isinstance(items, str):
return (items,)
if isinstance(items, bytes):
return (items,)
# if it's a dict-like, use just the values:
if hasattr(items, 'values') and callable(items.values):
return flatten(items.values())
# if it's iterable, turn it into a flat tuple:
if hasattr(items, '__iter__'):
return tuple(concat(map(flatten, items)))
# if it's neither of the above, assume it's scalar, wrap it in a 1-tuple.
return (items,)
def prop(p, item):
"""Get property "p" from "item", or None if it doesn't exist.
"""
return prop_lens(p).get(item)
def path(p, item):
"""Given a list-like "p", interpret each of its elements as a property and
follow the resulting path recursively like "prop()" does. Any failing
property lookup along the way short-circuits the lookup and returns None.
"""
return path_lens(p).get(item)
def assoc_path(p, value, item):
"""Associate a "value" into "item" along a "path". The semantics for the
"path" are the same as those for "path()", but rather than getting the
value at that position, create a new data structure with the value at that
position replaced.
"""
return path_lens(p).set(value, item)
def identity(x, *args, **kwargs):
"""The identity function: return the first argument unchanged, ignore all
other arguments.
"""
return x
def compose(f1, f2):
"""Function composition, in mathematical order (outer-first).
compose(f, g)(x) is equivalent to f(g(x)).
"""
def f(*args, **kwargs):
return f1(f2(*args, **kwargs))
return f
def rcompose(f1, f2):
"""Function composition, in pipeline order (inner-first).
rcompose(f, g)(x) is equivalent to g(f(x)).
"""
return compose(f2, f1)
def chain(*fns):
"""Function composition generalized into a variadic function.
The following equivalencies hold:
chain() === identity
chain(f) === f
chain(f, g) === compose(f, g)
chain(f, g, h) === compose(f, compose(g, h))
"""
if len(fns) == 0:
return identity
return fold(compose, tail(fns), head(fns))
def dictmap(f, item):
"""Specialized map() / fmap() for dictionaries. Maps over the values of a
dictionary-like object, retaining keys and key/value relationships.
For dictionary-like objects, equivalent to fmap(); for anything else, it
will however raise a TypeError.
"""
return dict((k, f(v)) for k, v in item.items())
def cat_maybes(items):
"""Specialized filter(), discarding all None's
"""
return tuple((item for item in items if item is not None))
class Lens(object):
"""A Lens abstracts over a getter/setter pair and represents a "view" on
a data object.
Lens Law:
lens.get(lens.set(value, subject)) == value
Informally: setting the target of a lens produces an object for which
getting the target of the same lens will return the value passed to the
setter.
Even more informally: lenses behave like properties, lens.get() being the
analog of dict.get(), and lens.put() the analog of dict.__setitem__().
Lenses are composable: see the compose_lens() function for a primitive
lens combinator.
"""
def __init__(self, getter, setter):
self.getter = getter
self.setter = setter
def get(self, subject):
"""Apply the lens' getter to the subject.
"""
return self.getter(subject)
def set(self, value, subject):
"""Apply the lens' setter to the subject.
"""
return self.setter(value, subject)
def over(self, f, subject):
"""Modify the target of the lens by applying the given function "f" to it.
"""
value = self.get(subject)
if value is None:
return subject
return self.set(f(value), subject)
def prop_lens(prop_name):
"""Create a Lens that drills down into a dict-like object's named property.
"""
def getter(subject):
if subject is None:
return None
return subject.get(prop_name)
def setter(value, subject):
if subject is None:
subject = {}
return assoc(prop_name, value, subject)
return Lens(getter, setter)
def path_lens(*path):
"""Create a Lens that follows a path of properties.
The path can be passed as variable positional args, or as any iterable, or
any combination thereof, so the following are all equivalent:
path_lens("foo", "bar", "baz")
path_lens(["foo", "bar", "baz"])
path_lens("foo", ["bar", "baz"])
path_lens(("foo", ("bar", ("baz"))))
Caveat: some collection types are iterable despite not having a defined
ordering. Using such types in your path will cause unpredictable behavior,
because the ordering of path segments cannot be guaranteed. For example:
path_lens(set(("foo", "bar")))
...could produce either foo -> bar or bar -> foo, depending on which
ordering the set happens to produce. So, uhm, don't do that OK?
For extra convenience, any of the individual path items can be a lens
instead of a prop name, which allows you to splice existing lenses into the
path:
path_lens("foo", some_lens, "bar")
This also means that path_lens can act as a generic variadic lens composer
function, and the following are equivalent:
path_lens(lens1, lens2, lens3)
compose_lens(lens1, compose_lens(lens2, lens3))
"""
path = flatten(path)
def to_lens(item):
if isinstance(item, Lens):
return item
else:
return prop_lens(item)
return fold(
compose_lens,
map(to_lens, path),
identity_lens())
def compose_lens(left, right):
"""Compose lenses outer-to-inner.
The following are equivalent:
get(compose_lens(left, right), subject) == get(left, get(right, subject))
"""
def getter(subject):
return right.get(left.get(subject))
def setter(value, subject):
inner = left.get(subject) or {}
new_inner = right.set(value, inner)
return left.set(new_inner, subject)
return Lens(getter, setter)
def identity_lens():
"""Create a no-op lens that operates on the subject itself. Note, however,
that .set() is still pure, meaning that it will not modify the subject;
instead, it will just return the value passed to it and not touch the
subject at all.
"""
def getter(subject):
return subject
def setter(value, subject):
return value
return Lens(getter, setter)
| {
"repo_name": "tracksinspector/papi",
"path": "papi/fp.py",
"copies": "1",
"size": "13612",
"license": "mit",
"hash": -8857678047709237000,
"line_mean": 32.0388349515,
"line_max": 82,
"alpha_frac": 0.6423743756,
"autogenerated": false,
"ratio": 3.7853170189099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9905357344123746,
"avg_score": 0.004466810077230698,
"num_lines": 412
} |
"""A collection of functions designed to help I/O with ascii files."""
__docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
from __builtin__ import bool, int, long, float, complex, object, unicode, str
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _to_filehandle(fname, flag='r', return_opened=False):
"""
Returns the filehandle corresponding to a string or a file.
If the string ends in '.gz', the file is automatically unzipped.
Parameters
----------
fname : string, filehandle
Name of the file whose filehandle must be returned.
flag : string, optional
Flag indicating the status of the file ('r' for read, 'w' for write).
return_opened : boolean, optional
Whether to return the opening status of the file.
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fhd = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
import bz2
fhd = bz2.BZ2File(fname)
else:
fhd = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fhd = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fhd, opened
return fhd
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError : If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : {False, True}, optional
Whether to transform a field with a shape into several fields or not.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
dtype('int32')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
(typ, _) = ndtype.fields[field]
flat_dt = flatten_dtype(typ, flatten_base)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comment : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments='#', autostrip=True):
self.comments = comments
# Delimiter is a character
if (delimiter is None) or _is_string_like(delimiter):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0]+list(delimiter))
delimiter = [slice(i,j) for (i,j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
#
def _delimited_splitter(self, line):
line = line.split(self.comments)[0].strip()
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i+fixed) for i in range(len(line))[::fixed]]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(line)
class NameValidator:
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list of
names to exclude, as well as a list of invalid characters. Names in the
exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of names,
and a list of valid names will be created.
The `__call__` method accepts an optional keyword "default" that sets
the default name in case of ambiguity. By default this is 'f', so
that names will default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return', 'file', 'print']. Excluded names are appended an underscore:
for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
casesensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its method
`validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
['file_', 'field2', 'with_space', 'CaSe']
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
deletechars='q',
case_sensitive='False')
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
['excl_', 'field2', 'no_', 'with_space', 'case']
"""
#
defaultexcludelist = ['return','file','print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None, case_sensitive=None):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or ('u' in case_sensitive):
self.case_converter = lambda x: x.upper()
elif 'l' in case_sensitive:
self.case_converter = lambda x: x.lower()
else:
self.case_converter = lambda x: x
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings to use as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string reduces its
length to zero.
nboutput : integer, optional
Final number of validated names, used to expand or shrink the initial
list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the same as
calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, basestring):
names = [names,]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item)
item = item.strip().replace(' ', '_')
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter:
"""
Factory class for function transforming a string into another object (int,
float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a missing
value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`.
If a function, this function is used to convert a string to another
object. In this case, it is recommended to give an associated default
value as input.
default : any, optional
Value to return by default, that is, when the string to be converted
is flagged as missing. If not given, `StringConverter` tries to supply
a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(complex, complex, nx.nan+0j),
(nx.string_, str, '???')]
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and its
corresponding default.
The input function (or sequence of functions) and its associated default
value (if any) is inserted in penultimate position of the mapper.
The corresponding type is estimated from the dtype of the default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetustil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func)-len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
ttype = np.bool
else:
# Is the input a np.dtype ?
try:
self.func = None
ttype = np.dtype(dtype_or_func).type
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = "The input argument `dtype` is neither a function"\
" or a dtype (got '%s' instead)"
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
ttype = self._getsubdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(ttype, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to smthg more robust
if self.func == self._mapper[1][1]:
self.func = lambda x : int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([''])
else:
if isinstance(missing_values, basestring):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
#
self._callingfunction = self._strict_call
self.type = ttype
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
return self.func(value)
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def upgrade(self, value):
"""
Try to find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the `StringConverter`
instance is tried, if this fails other available converters are tried.
The order in which these other converters are tried is determined by the
`_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
self._strict_call(value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
map(_strict_call, value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
raise ConverterError("Could not find a valid conversion function")
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self._status = _status
self.iterupgrade(value)
def update(self, func, default=None, missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be converted
is flagged as missing. If not given, `StringConverter` tries to supply
a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of `StringConverter`,
except that `func` does not accept a `dtype` whereas `dtype_or_func` in
the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._getsubdtype(default)
else:
try:
tester = func('1')
except (TypeError, ValueError):
tester = None
self.type = self._getsubdtype(tester)
# Add the missing values to the existing set
if missing_values is not None:
if _is_string_like(missing_values):
self.missing_values.add(missing_values)
elif hasattr(missing_values, '__iter__'):
for val in missing_values:
self.missing_values.add(val)
else:
self.missing_values = []
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary
recognized by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list of
names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a `NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, basestring):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if nbtypes == 0:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(zip(names, formats))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=nbtypes,
defaultfmt=defaultfmt)
# No implicit names
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
if (ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and \
(defaultfmt != "f%i"):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| {
"repo_name": "plaes/numpy",
"path": "numpy/lib/_iotools.py",
"copies": "2",
"size": "27692",
"license": "bsd-3-clause",
"hash": -8872808830122175000,
"line_mean": 33.6583229036,
"line_max": 83,
"alpha_frac": 0.5633034811,
"autogenerated": false,
"ratio": 4.441379310344828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6004682791444828,
"avg_score": null,
"num_lines": null
} |
"""A collection of functions designed to help I/O with ascii files."""
__docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
from __builtin__ import bool, int, long, float, complex, object, unicode, str
from numpy.compat import asbytes, bytes, asbytes_nested
if sys.version_info[0] >= 3:
def _bytes_to_complex(s):
return complex(s.decode('ascii'))
def _bytes_to_name(s):
return s.decode('ascii')
else:
_bytes_to_complex = complex
_bytes_to_name = str
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + asbytes('')
except (TypeError, ValueError):
return False
return True
def _to_filehandle(fname, flag='r', return_opened=False):
"""
Returns the filehandle corresponding to a string or a file.
If the string ends in '.gz', the file is automatically unzipped.
Parameters
----------
fname : string, filehandle
Name of the file whose filehandle must be returned.
flag : string, optional
Flag indicating the status of the file ('r' for read, 'w' for write).
return_opened : boolean, optional
Whether to return the opening status of the file.
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fhd = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
import bz2
fhd = bz2.BZ2File(fname)
else:
fhd = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fhd = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fhd, opened
return fhd
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError : If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : {False, True}, optional
Whether to transform a field with a shape into several fields or not.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
dtype('int32')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
(typ, _) = ndtype.fields[field]
flat_dt = flatten_dtype(typ, flatten_base)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comment : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
self.comments = comments
# Delimiter is a character
if isinstance(delimiter, unicode):
delimiter = delimiter.encode('ascii')
if (delimiter is None) or _is_bytes_like(delimiter):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
#
def _delimited_splitter(self, line):
line = line.split(self.comments)[0].strip(asbytes(" \r\n"))
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(line)
class NameValidator:
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list of
names to exclude, as well as a list of invalid characters. Names in the
exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of names,
and a list of valid names will be created.
The `__call__` method accepts an optional keyword "default" that sets
the default name in case of ambiguity. By default this is 'f', so
that names will default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return', 'file', 'print']. Excluded names are appended an underscore:
for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
casesensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space: '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its method
`validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
['file_', 'field2', 'with_space', 'CaSe']
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
deletechars='q',
case_sensitive='False')
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
['excl_', 'field2', 'no_', 'with_space', 'case']
"""
#
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or ('u' in case_sensitive):
self.case_converter = lambda x: x.upper()
elif 'l' in case_sensitive:
self.case_converter = lambda x: x.lower()
else:
self.case_converter = lambda x: x
#
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings to use as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string reduces its
length to zero.
nboutput : integer, optional
Final number of validated names, used to expand or shrink the initial
list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the same as
calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, basestring):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == asbytes('TRUE'):
return True
elif value == asbytes('FALSE'):
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter:
"""
Factory class for function transforming a string into another object (int,
float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a missing
value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`.
If a function, this function is used to convert a string to another
object. In this case, it is recommended to give an associated default
value as input.
default : any, optional
Value to return by default, that is, when the string to be converted
is flagged as missing. If not given, `StringConverter` tries to supply
a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(complex, _bytes_to_complex, nx.nan + 0j),
(nx.string_, bytes, asbytes('???'))]
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and its
corresponding default.
The input function (or sequence of functions) and its associated default
value (if any) is inserted in penultimate position of the mapper.
The corresponding type is estimated from the dtype of the default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetustil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Convert unicode (for Py3)
if isinstance(missing_values, unicode):
missing_values = asbytes(missing_values)
elif isinstance(missing_values, (list, tuple)):
missing_values = asbytes_nested(missing_values)
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
ttype = np.bool
else:
# Is the input a np.dtype ?
try:
self.func = None
ttype = np.dtype(dtype_or_func).type
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = "The input argument `dtype` is neither a function"\
" or a dtype (got '%s' instead)"
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to None
if default is None:
try:
default = self.func(asbytes('0'))
except ValueError:
default = None
ttype = self._getsubdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(ttype, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to smthg more robust
if self.func == self._mapper[1][1]:
self.func = lambda x : int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([asbytes('')])
else:
if isinstance(missing_values, bytes):
missing_values = missing_values.split(asbytes(","))
self.missing_values = set(list(missing_values) + [asbytes('')])
#
self._callingfunction = self._strict_call
self.type = ttype
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
return self.func(value)
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def upgrade(self, value):
"""
Try to find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the `StringConverter`
instance is tried, if this fails other available converters are tried.
The order in which these other converters are tried is determined by the
`_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
self._strict_call(value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
map(_strict_call, value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
raise ConverterError("Could not find a valid conversion function")
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self._status = _status
self.iterupgrade(value)
def update(self, func, default=None, missing_values=asbytes(''),
locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be converted
is flagged as missing. If not given, `StringConverter` tries to supply
a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of `StringConverter`,
except that `func` does not accept a `dtype` whereas `dtype_or_func` in
the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._getsubdtype(default)
else:
try:
tester = func(asbytes('1'))
except (TypeError, ValueError):
tester = None
self.type = self._getsubdtype(tester)
# Add the missing values to the existing set
if missing_values is not None:
if _is_bytes_like(missing_values):
self.missing_values.add(missing_values)
elif hasattr(missing_values, '__iter__'):
for val in missing_values:
self.missing_values.add(val)
else:
self.missing_values = []
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary
recognized by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list of
names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a `NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, basestring):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if nbtypes == 0:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(zip(names, formats))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=nbtypes,
defaultfmt=defaultfmt)
# No implicit names
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
if (ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and \
(defaultfmt != "f%i"):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| {
"repo_name": "dagss/numpy_svn",
"path": "numpy/lib/_iotools.py",
"copies": "5",
"size": "28938",
"license": "bsd-3-clause",
"hash": -7855578288635591000,
"line_mean": 33.6562874251,
"line_max": 83,
"alpha_frac": 0.5651392633,
"autogenerated": false,
"ratio": 4.419364691508858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003071214927358906,
"num_lines": 835
} |
"""A collection of functions designed to help I/O with ascii files.
"""
__docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
from numpy.compat import asbytes, asunicode, bytes
def _decode_line(line, encoding=None):
"""Decode bytes from binary input streams.
Defaults to decoding from 'latin1'. That differs from the behavior of
np.compat.asunicode that decodes from 'ascii'.
Parameters
----------
line : str or bytes
Line to be decoded.
Returns
-------
decoded_line : unicode
Unicode in Python 2, a str (unicode) in Python 3.
"""
if type(line) is bytes:
if encoding is None:
line = line.decode('latin1')
else:
line = line.decode(encoding)
return line
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + b''
except (TypeError, ValueError):
return False
return True
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError
If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names is not None:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : bool, optional
If True, transform a field with a shape into several fields. Default is
False.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('S4'),
dtype('float64'),
dtype('float64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
info = ndtype.fields[field]
flat_dt = flatten_dtype(info[0], flatten_base)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments='#', autostrip=True,
encoding=None):
delimiter = _decode_line(delimiter)
comments = _decode_line(comments)
self.comments = comments
# Delimiter is a character
if (delimiter is None) or isinstance(delimiter, str):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (
self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
self.encoding = encoding
#
def _delimited_splitter(self, line):
"""Chop off comments, strip, and split at delimiter. """
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(" \r\n")
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip("\r\n")
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(_decode_line(line, self.encoding))
class NameValidator:
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list
of names to exclude, as well as a list of invalid characters. Names in
the exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of
names, and a list of valid names will be created. The `__call__`
method accepts an optional keyword "default" that sets the default name
in case of ambiguity. By default this is 'f', so that names will
default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default
list ['return', 'file', 'print']. Excluded names are appended an
underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its
method `validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
('file_', 'field2', 'with_space', 'CaSe')
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
... deletechars='q',
... case_sensitive=False)
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
"""
#
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
#
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, str):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter:
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.int_, int, -1),]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.int_ is nx.int32.
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.unicode_, asunicode, '???'),
(nx.string_, asbytes, '???'),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),])
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
if dtype.type == np.datetime64:
return dtype
return dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = dateutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(dtype.type, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
# if a converter for the specific dtype is available use that
last_func = func
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if dtype.type == deftype:
_status = i
last_func = func
if default is None:
self.default = default_def
else:
self.default = default
break
func = last_func
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = {''}
else:
if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
#
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def _do_upgrade(self):
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
self.type, self.func, default = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
self._do_upgrade()
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
self._do_upgrade()
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is `''`.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
ndtype.names = validate([''] * len(ndtype.names),
defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| {
"repo_name": "WarrenWeckesser/numpy",
"path": "numpy/lib/_iotools.py",
"copies": "4",
"size": "30879",
"license": "bsd-3-clause",
"hash": -7375033473446904000,
"line_mean": 33.0452039691,
"line_max": 79,
"alpha_frac": 0.5613523754,
"autogenerated": false,
"ratio": 4.437275470613594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00004733108371476618,
"num_lines": 907
} |
"""A collection of functions designed to help I/O with ascii files.
"""
from __future__ import division, absolute_import, print_function
__docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
from numpy.compat import asbytes, asunicode, bytes, asbytes_nested, basestring
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
def _decode_line(line, encoding=None):
"""Decode bytes from binary input streams.
Defaults to decoding from 'latin1'. That differs from the behavior of
np.compat.asunicode that decodes from 'ascii'.
Parameters
----------
line : str or bytes
Line to be decoded.
Returns
-------
decoded_line : unicode
Unicode in Python 2, a str (unicode) in Python 3.
"""
if type(line) is bytes:
if encoding is None:
line = line.decode('latin1')
else:
line = line.decode(encoding)
return line
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + b''
except (TypeError, ValueError):
return False
return True
def _to_filehandle(fname, flag='r', return_opened=False):
"""
Returns the filehandle corresponding to a string or a file.
If the string ends in '.gz', the file is automatically unzipped.
Parameters
----------
fname : string, filehandle
Name of the file whose filehandle must be returned.
flag : string, optional
Flag indicating the status of the file ('r' for read, 'w' for write).
return_opened : boolean, optional
Whether to return the opening status of the file.
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fhd = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
import bz2
fhd = bz2.BZ2File(fname)
else:
fhd = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fhd = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fhd, opened
return fhd
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError
If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : bool, optional
If True, transform a field with a shape into several fields. Default is
False.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
dtype('int32')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
info = ndtype.fields[field]
flat_dt = flatten_dtype(info[0], flatten_base)
types.extend(flat_dt)
return types
class LineSplitter(object):
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None):
delimiter = _decode_line(delimiter)
comments = _decode_line(comments)
self.comments = comments
# Delimiter is a character
if (delimiter is None) or isinstance(delimiter, basestring):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (
self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
self.encoding = encoding
#
def _delimited_splitter(self, line):
"""Chop off comments, strip, and split at delimiter. """
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(" \r\n")
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip("\r\n")
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(_decode_line(line, self.encoding))
class NameValidator(object):
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list
of names to exclude, as well as a list of invalid characters. Names in
the exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of
names, and a list of valid names will be created. The `__call__`
method accepts an optional keyword "default" that sets the default name
in case of ambiguity. By default this is 'f', so that names will
default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default
list ['return', 'file', 'print']. Excluded names are appended an
underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its
method `validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
['file_', 'field2', 'with_space', 'CaSe']
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
deletechars='q',
case_sensitive='False')
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
['excl_', 'field2', 'no_', 'with_space', 'case']
"""
#
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
#
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, basestring):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter(object):
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1)]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.integer is nx.int32.
if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.unicode_, asunicode, '???'),
(nx.string_, asbytes, '???')])
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
if dtype.type == np.datetime64:
return dtype
return dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetustil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(dtype.type, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
# if a converter for the specific dtype is available use that
last_func = func
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if dtype.type == deftype:
_status = i
last_func = func
if default is None:
self.default = default_def
else:
self.default = default
break
func = last_func
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([''])
else:
if isinstance(missing_values, basestring):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
#
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
raise ConverterError(
"Could not find a valid conversion function"
)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self._status = _status
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is `''`.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, basestring) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, basestring):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if nbtypes == 0:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=nbtypes,
defaultfmt=defaultfmt)
# No implicit names
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
(defaultfmt != "f%i")):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| {
"repo_name": "Eric89GXL/numpy",
"path": "numpy/lib/_iotools.py",
"copies": "15",
"size": "32704",
"license": "bsd-3-clause",
"hash": -2698835629310235600,
"line_mean": 33.3168940189,
"line_max": 84,
"alpha_frac": 0.5635396282,
"autogenerated": false,
"ratio": 4.45558583106267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000012344916980433306,
"num_lines": 953
} |
#A collection of functions for dealing with Dadi models
# A. Kern
import dadi
import numpy
import scipy
import pylab
#import nlopt
######### Demographic stuff
def OutOfAfricaGrowB((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=0, m21=0)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_admix((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,T_ad, p_ad, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA+T_ad))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=0, m21=0)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA+T_ad))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA+T_ad)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
nuEu0 = nuEu_func(TEuNA)
nuNA0 = nuNA_func(TEuNA)
phi = dadi.PhiManip.phi_3D_admix_1_and_2_into_3(phi, p_ad,0, xx,xx,xx)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/T_ad)
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/T_ad)
phi = dadi.Integration.three_pops(phi, xx, T_ad, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_mig_Af_NA((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,mNA_Af,mAf_NA, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=0, m21=0)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=0, m13=mAf_NA, m21=0, m23=0,
m31=mNA_Af, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_mig((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=mAf_Eu, m21=mEu_Af)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_mig_noAncient((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=0, m21=0)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_mig_admix((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,T_ad,p_ad,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA+T_ad))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=mAf_Eu, m21=mEu_Af)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA+T_ad))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA+T_ad)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
nuEu0 = nuEu_func(TEuNA)
nuNA0 = nuNA_func(TEuNA)
phi = dadi.PhiManip.phi_3D_admix_1_and_2_into_3(phi, p_ad,0, xx,xx,xx)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/T_ad)
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/T_ad)
phi = dadi.Integration.three_pops(phi, xx, T_ad, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica2((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu0,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica2_mig((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu0,
m12=mAf_Eu, m21=mEu_Af)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica2_mig_admix((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,T_ad,p_ad,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu0,
m12=mAf_Eu, m21=mEu_Af)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TEuNA+T_ad))
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA+T_ad)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
nuEu0 = nuEu_func(TEuNA)
nuNA0 = nuNA_func(TEuNA)
phi = dadi.PhiManip.phi_3D_admix_1_and_2_into_3(phi, p_ad,0, xx,xx,xx)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/T_ad)
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/T_ad)
phi = dadi.Integration.three_pops(phi, xx, T_ad, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica3((nuAf, nuEu, nuNA,
TAf, TB, TEuNA, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_to_3D_split_2(xx, phi)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu, nu3=nuNA,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica4((nuAf, nuEu, nuNA,
TAf, TB, TEuNA, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_to_3D_split_1(xx, phi)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu, nu3=nuNA,
m12=0, m13=0, m21=0, m23=0,
m31=0, m32=0)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
def OutOfAfrica_mig_admix2((nuAf, nuEu0, nuEu, nuNA0, nuNA,
TAf, TB, TEuNA,T_ad,mAf_Eu,mAf_NA,mEu_Af,mEu_NA,mNA_Af,mNA_Eu, p_misid), (n1,n2,n3), pts):
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.Integration.one_pop(phi, xx, TAf, nu=nuAf)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/(TB+TEuNA+T_ad))
phi = dadi.Integration.two_pops(phi, xx, TB, nu1=nuAf, nu2=nuEu_func,
m12=mAf_Eu, m21=mEu_Af)
nuEu0 = nuEu_func(TB)
phi = dadi.PhiManip.phi_2D_to_3D_admix(phi,p_ad,xx,xx,xx)
nuEu_func = lambda t: nuEu0*(nuEu/nuEu0)**(t/TEuNA)
nuNA_func = lambda t: nuNA0*(nuNA/nuNA0)**(t/TEuNA)
phi = dadi.Integration.three_pops(phi, xx, TEuNA, nu1=nuAf,
nu2=nuEu_func, nu3=nuNA_func,
m12=mAf_Eu, m13=mAf_NA, m21=mEu_Af, m23=mEu_NA,
m31=mNA_Af, m32=mNA_Eu)
fs = dadi.Spectrum.from_phi(phi, (n1,n2,n3), (xx,xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
###################################################
################## Two Populations
##two population model with misorientation
def IM_misorient_5epoch(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu1_1,nu1_2,nu1_3,nu1_4,nu2_0,nu2_1,nu2_2,nu2_3,nu2_4,t0,t1,t2,t3,t4,m12,m21,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
m12: Migration from pop 2 to pop 1 (2*Na*m12)
m21: Migration from pop 1 to pop 2
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu1_1,nu1_2,nu1_3,nu1_4,nu2_0,nu2_1,nu2_2,nu2_3,nu2_4,t0,t1,t2,t3,t4,m12,m21,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
phi = dadi.Integration.two_pops(phi, xx, t0, nu1_0, nu2_0,
m12=m12, m21=m21)
phi = dadi.Integration.two_pops(phi, xx, t1, nu1_1, nu2_1,
m12=m12, m21=m21)
phi = dadi.Integration.two_pops(phi, xx, t2, nu1_2, nu2_2,
m12=m12, m21=m21)
phi = dadi.Integration.two_pops(phi, xx, t3, nu1_3, nu2_3,
m12=m12, m21=m21)
phi = dadi.Integration.two_pops(phi, xx, t4, nu1_4, nu2_4,
m12=m12, m21=m21)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
m12: Migration from pop 2 to pop 1 (2*Na*m12)
m21: Migration from pop 1 to pop 2
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,m12,m21,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/T)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/T)
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=m12, m21=m21)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient_noMig(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/T)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/T)
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=0, m21=0)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient_admix(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad,p_ad,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
m12: Migration from pop 2 to pop 1 (2*Na*m12)
m21: Migration from pop 1 to pop 2
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad,p_ad,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/(T+t_ad))
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/(T+t_ad))
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=m12, m21=m21)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad, xx,xx)
nu1_0 = nu1_func(t_ad)
nu2_0 = nu2_func(t_ad)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad)
phi = dadi.Integration.two_pops(phi, xx, t_ad, nu1_func, nu2_func,
m12=m12, m21=m21)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient_doubleAdmix(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad1,p_ad1,t_ad2,p_ad2,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
m12: Migration from pop 2 to pop 1 (2*Na*m12)
m21: Migration from pop 1 to pop 2
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad1,p_ad1,t_ad2,p_ad2,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/(T+t_ad1+t_ad2))
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/(T+t_ad1+t_ad2))
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=m12, m21=m21)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad1, xx,xx)
nu1_0 = nu1_func(t_ad1)
nu2_0 = nu2_func(t_ad1)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad1+t_ad2)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad1+t_ad2)
phi = dadi.Integration.two_pops(phi, xx, t_ad1, nu1_func, nu2_func,
m12=m12, m21=m21)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad2, xx,xx)
nu1_0 = nu1_func(t_ad2)
nu2_0 = nu2_func(t_ad2)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad2)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad2)
phi = dadi.Integration.two_pops(phi, xx, t_ad2, nu1_func, nu2_func,
m12=m12, m21=m21)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient_doubleAdmix_noMig(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad1,p_ad1,t_ad2,p_ad2,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad1,p_ad1,t_ad2,p_ad2,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/(T+t_ad1+t_ad2))
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/(T+t_ad1+t_ad2))
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad1, xx,xx)
nu1_0 = nu1_func(t_ad1)
nu2_0 = nu2_func(t_ad1)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad1+t_ad2)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad1+t_ad2)
phi = dadi.Integration.two_pops(phi, xx, t_ad1, nu1_func, nu2_func,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad2, xx,xx)
nu1_0 = nu1_func(t_ad2)
nu2_0 = nu2_func(t_ad2)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad2)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad2)
phi = dadi.Integration.two_pops(phi, xx, t_ad2, nu1_func, nu2_func,
m12=0, m21=0)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##two population model with misorientation
def IM_misorient_noMig_admix(params, ns, pts):
"""
ns = (n1,n2)
params = (nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad,p_ad,p_misid)
Isolation-with-migration model with exponential pop growth.
nu1_0: Size of pop 1 after split.
nu2_0: Size of pop 2 after split.
nu1: Final size of pop 1.
nu2: Final size of pop 2.
T: Time in the past of split (in units of 2*Na generations)
n1,n2: Sample sizes of resulting Spectrum
pts: Number of grid points to use in integration.
"""
nu1_0,nu2_0,nu1,nu2,T,t_ad,p_ad,p_misid = params
xx = dadi.Numerics.default_grid(pts)
phi = dadi.PhiManip.phi_1D(xx)
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/(T+t_ad))
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/(T+t_ad))
phi = dadi.Integration.two_pops(phi, xx, T, nu1_func, nu2_func,
m12=0, m21=0)
phi = dadi.PhiManip.phi_2D_admix_1_into_2(phi, p_ad, xx,xx)
nu1_0 = nu1_func(t_ad)
nu2_0 = nu2_func(t_ad)
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/t_ad)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/t_ad)
phi = dadi.Integration.two_pops(phi, xx, t_ad, nu1_func, nu2_func,
m12=0, m21=0)
fs = dadi.Spectrum.from_phi(phi, ns, (xx,xx))
return (1-p_misid)*fs + p_misid * dadi.Numerics.reverse_array(fs)
##########################
#######
#### Helper functions
def makeRandomParams(lower,upper):
pNew=numpy.zeros(len(lower))
for i in range(len(lower)):
pNew[i]= numpy.random.uniform(lower[i],upper[i])
return pNew
def plot2file_3d_comp_multinom(model, data, filename,vmin=None, vmax=None,
resid_range=None, fig_num=None,
pop_ids=None, residual='Anscombe', adjust=True):
"""
Multinomial comparison between 3d model and data.
model: 3-dimensional model SFS
data: 3-dimensional data SFS
vmin, vmax: Minimum and maximum values plotted for sfs are vmin and
vmax respectively.
resid_range: Residual plot saturates at +- resid_range.
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
pop_ids: If not None, override pop_ids stored in Spectrum.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
adjust: Should method use automatic 'subplots_adjust'? For advanced
manipulation of plots, it may be useful to make this False.
This comparison is multinomial in that it rescales the model to optimally
fit the data.
"""
model = dadi.Inference.optimally_scaled_sfs(model, data)
plot2file_3d_comp_Poisson(model, data,filename, vmin=vmin, vmax=vmax,
resid_range=resid_range, fig_num=fig_num,
pop_ids=pop_ids, residual=residual,
adjust=adjust)
def plot2file_3d_comp_Poisson(model, data,filename, vmin=None, vmax=None,
resid_range=None, fig_num=None, pop_ids=None,
residual='Anscombe', adjust=True):
"""
Poisson comparison between 3d model and data.
model: 3-dimensional model SFS
data: 3-dimensional data SFS
vmin, vmax: Minimum and maximum values plotted for sfs are vmin and
vmax respectively.
resid_range: Residual plot saturates at +- resid_range.
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
pop_ids: If not None, override pop_ids stored in Spectrum.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
adjust: Should method use automatic 'subplots_adjust'? For advanced
manipulation of plots, it may be useful to make this False.
"""
if data.folded and not model.folded:
model = model.fold()
masked_model, masked_data = dadi.Numerics.intersect_masks(model, data)
if fig_num is None:
f = pylab.gcf()
else:
f = pylab.figure(fig_num, figsize=(8,10))
pylab.clf()
if adjust:
pylab.subplots_adjust(bottom=0.07, left=0.07, top=0.95, right=0.95)
modelmax = max(masked_model.sum(axis=sax).max() for sax in range(3))
datamax = max(masked_data.sum(axis=sax).max() for sax in range(3))
modelmin = min(masked_model.sum(axis=sax).min() for sax in range(3))
datamin = min(masked_data.sum(axis=sax).min() for sax in range(3))
max_toplot = max(modelmax, datamax)
min_toplot = min(modelmin, datamin)
if vmax is None:
vmax = max_toplot
if vmin is None:
vmin = min_toplot
extend = dadi.Plotting._extend_mapping[vmin <= min_toplot, vmax >= max_toplot]
# Calculate the residuals
if residual == 'Anscombe':
resids = [dadi.Inference.\
Anscombe_Poisson_residual(masked_model.sum(axis=2-sax),
masked_data.sum(axis=2-sax),
mask=vmin) for sax in range(3)]
elif residual == 'linear':
resids =[dadi.Inference.\
linear_Poisson_residual(masked_model.sum(axis=2-sax),
masked_data.sum(axis=2-sax),
mask=vmin) for sax in range(3)]
else:
raise ValueError("Unknown class of residual '%s'." % residual)
min_resid = min([r.min() for r in resids])
max_resid = max([r.max() for r in resids])
if resid_range is None:
resid_range = max((abs(max_resid), abs(min_resid)))
resid_extend = dadi.Plotting._extend_mapping[-resid_range <= min_resid,
resid_range >= max_resid]
if pop_ids is not None:
if len(pop_ids) != 3:
raise ValueError('pop_ids must be of length 3.')
data_ids = model_ids = resid_ids = pop_ids
else:
data_ids = masked_data.pop_ids
model_ids = masked_model.pop_ids
if model_ids is None:
model_ids = data_ids
if model_ids == data_ids:
resid_ids = model_ids
else:
resid_ids = None
for sax in range(3):
marg_data = masked_data.sum(axis=2-sax)
marg_model = masked_model.sum(axis=2-sax)
curr_ids = []
for ids in [data_ids, model_ids, resid_ids]:
if ids is None:
ids = ['pop0', 'pop1', 'pop2']
if ids is not None:
ids = list(ids)
del ids[2-sax]
curr_ids.append(ids)
ax = pylab.subplot(4,3,sax+1)
plot_colorbar = (sax == 2)
dadi.Plotting.plot_single_2d_sfs(marg_data, vmin=vmin, vmax=vmax, pop_ids=curr_ids[0],
extend=extend, colorbar=plot_colorbar)
pylab.subplot(4,3,sax+4, sharex=ax, sharey=ax)
dadi.Plotting.plot_single_2d_sfs(marg_model, vmin=vmin, vmax=vmax,
pop_ids=curr_ids[1], extend=extend, colorbar=False)
resid = resids[sax]
pylab.subplot(4,3,sax+7, sharex=ax, sharey=ax)
dadi.Plotting.plot_2d_resid(resid, resid_range, pop_ids=curr_ids[2],
extend=resid_extend, colorbar=plot_colorbar)
ax = pylab.subplot(4,3,sax+10)
flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()),
resid.ravel())
ax.hist(flatresid, bins=20, normed=True)
ax.set_yticks([])
pylab.savefig(filename, bbox_inches='tight')
################################################
## MS stuff
## and discoal... and msAdmix....
##########
def IM_misorient_admix_core(params):
"""
msAdmix core command for IM_misorient_admix.
"""
nu1_0,nu2_0,nu1,nu2,T,m12,m21,t_ad,p_ad,p_misid = params
alpha1 = numpy.log(nu1/nu1_0)/T
alpha2 = numpy.log(nu2/nu2_0)/T
command = "-n 1 %(nu1)f -n 2 %(nu2)f "\
"-eg 0 1 %(alpha1)f -eg 0 2 %(alpha2)f "\
"-ma x %(m12)f %(m21)f x "\
"-eA %(t_ad)f 2 1 %(p_ad)f "\
"-ej %(T)f 2 1 -en %(T)f 1 1"
sub_dict = {'nu1':nu1, 'nu2':nu2, 'alpha1':2*alpha1, 'alpha2':2*alpha2,
'm12':2*m12, 'm21':2*m21, 'T': T/2, 't_ad':t_ad/2, 'p_ad':p_ad}
return command % sub_dict
def msAdmix_command(theta, ns, core, iter, recomb=0, rsites=None):
"""
Generate ms command for simulation from core.
theta: Assumed theta
ns: Sample sizes
core: Core of ms command that specifies demography.
iter: Iterations to run ms
recomb: Assumed recombination rate
rsites: Sites for recombination. If None, default is 10*theta.
"""
if len(ns) > 1:
ms_command = "msAdmix %(total_chrom)i %(iter)i -t %(theta)f -I %(numpops)i "\
"%(sample_sizes)s %(core)s"
else:
ms_command = "msAdmix %(total_chrom)i %(iter)i -t %(theta)f %(core)s"
if recomb:
ms_command = ms_command + " -r %(recomb)f %(rsites)i"
if not rsites:
rsites = theta*10
sub_dict = {'total_chrom': numpy.sum(ns), 'iter': iter, 'theta': theta,
'numpops': len(ns), 'sample_sizes': ' '.join(map(str, ns)),
'core': core, 'recomb': recomb, 'rsites': rsites}
return ms_command % sub_dict
| {
"repo_name": "kern-lab/shanku_et_al",
"path": "dadiStuff/dadiFunctions.py",
"copies": "1",
"size": "33020",
"license": "mit",
"hash": 8386585107928500000,
"line_mean": 39.2682926829,
"line_max": 114,
"alpha_frac": 0.567383404,
"autogenerated": false,
"ratio": 2.504930966469428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3572314370469428,
"avg_score": null,
"num_lines": null
} |
# A collection of functions for processing, manipulating and calculating
# necessary information from tensors
#
# Aim for unifiying the API among numpy, tensorflow and pytorch.
# Since numpy is most popular framework, the function name is strictly
# following numpy name and arguments.
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from __future__ import absolute_import, division, print_function
import copy
import inspect
import numbers
from collections import defaultdict
from contextlib import contextmanager
from functools import wraps
import numpy as np
import scipy as sp
import tensorflow as tf
import torch
from six import string_types
from six.moves import builtins
from tensorflow import nest
from tensorflow.python.ops import init_ops
from odin.utils import as_tuple, is_number, is_same_shape, is_string, uuid
# TODO: add stack for setting framework context
_FRAMEWORK_STACK = ['numpy']
class framework_(object):
"""
```python
with bk.framework_('tensorflow') as fw1:
print("Context 1:", bk.get_framework(), fw1)
with bk.framework_('pytorch') as fw2:
print("Context 2:", bk.get_framework(), fw2)
print("Context 1:", bk.get_framework())
print("Default:", bk.get_framework())
bk.framework_('tensorflow')
print("Current:", bk.get_framework())
bk.reset_framework()
print("Reset:", bk.get_framework())
# Context 1: tensorflow <module 'tensorflow'>
# Context 2: pytorch <module 'torch'>
# Context 1: tensorflow
# Default: numpy
# Current: tensorflow
# Reset: numpy
```
"""
def __init__(self, framework):
framework = parse_framework(framework)
_FRAMEWORK_STACK.append(framework)
self._framework = framework
def __enter__(self):
return parse_framework(self._framework)
def __exit__(self, *args):
reset_framework()
def reset_framework():
if len(_FRAMEWORK_STACK) > 1:
_FRAMEWORK_STACK.pop()
def get_framework():
return _FRAMEWORK_STACK[-1]
def parse_framework(alias):
""" Convert a string or object to appropriate framework module: numpy,
tensorflow or torch """
if inspect.ismodule(alias):
if alias in (tf, torch, np):
return alias
alias = str(alias)
elif alias is None:
return get_framework()
elif inspect.isclass(alias):
alias = ''.join([str(i) for i in type.mro(alias)])
elif not isinstance(alias, string_types):
alias = type(alias)
alias = ''.join([str(i) for i in type.mro(alias)])
alias = alias.strip().lower()
if any(i in alias for i in ['tf', 'tensorflow', 'tensor']):
return tf
if any(i in alias for i in ['torch', 'pytorch', 'pt', 'tr']):
return torch
return np
# ===========================================================================
# Helper
# ===========================================================================
def _normalize_axis(axis, ndim):
if axis is None:
return None
if isinstance(axis, (tuple, list)):
return tuple([a % ndim if a is not None else a for a in axis])
return axis % ndim
def dtype_universal(dtype,
torch_dtype=False,
tf_dtype=False,
np_dtype=False,
framework=None):
if dtype is None:
return dtype
if sum([torch_dtype, tf_dtype, np_dtype]) > 1:
raise ValueError("Cannot only return dtype for 1 framework a time.")
if isinstance(dtype, tf.dtypes.DType):
dtype = dtype.name
elif isinstance(dtype, torch.dtype):
dtype = str(dtype).split('.')[-1]
elif isinstance(dtype, np.dtype):
dtype = np.dtype(dtype).name
if framework is not None:
framework = parse_framework(framework)
if framework == np:
np_dtype = True
torch_dtype = False
tf_dtype = False
if framework == torch:
torch_dtype = True
np_dtype = False
tf_dtype = False
if framework == tf:
tf_dtype = True
torch_dtype = False
np_dtype = False
dtype = dtype.lower().strip()
if torch_dtype:
if dtype == 'float' or dtype == 'float32':
return torch.float32
if dtype == 'float64':
return torch.float64
if dtype == 'float16' or dtype == 'half':
return torch.float16
if dtype == 'int8':
return torch.int8
if dtype == 'uint8':
return torch.uint8
if dtype == 'int16' or dtype == 'short':
return torch.int16
if dtype == 'int' or dtype == 'int32':
return torch.int32
if dtype == 'int64' or dtype == 'long':
return torch.int64
if 'bool' in dtype:
return torch.bool
if tf_dtype:
return tf.as_dtype(dtype)
if np_dtype:
return np.dtype(dtype)
return dtype
def cast(x, dtype):
if tf.is_tensor(x) or isinstance(dtype, tf.DType):
return tf.cast(x, dtype=dtype_universal(dtype, tf_dtype=True))
if torch.is_tensor(x) or isinstance(dtype, torch.dtype):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x.type(dtype_universal(dtype, torch_dtype=True))
dtype = dtype_universal(dtype, np_dtype=True)
return np.cast[dtype](x)
def array(x, framework=None, dtype=None):
""" This function equal to `numpy.array` for numpy;
`tensorflow.convert_to_tensor` for tensorflow;
and `torch.tensor` for pytorch
"""
in_framework = parse_framework(x)
out_framework = parse_framework(framework)
if in_framework != out_framework:
# any conversion must go through numpy
if in_framework != np:
x = x.numpy()
if out_framework == tf:
return tf.convert_to_tensor(x, dtype=dtype)
if out_framework == torch:
if dtype is not None:
x = cast(x, dtype)
return torch.from_numpy(x)
return x
# ===========================================================================
# Variable and gradients
# ===========================================================================
def variable(initial_value, framework=None, dtype=None, trainable=True):
framework = parse_framework(framework)
if framework == tf:
return tf.Variable(initial_value=initial_value,
dtype=dtype_universal(dtype, tf_dtype=True),
trainable=trainable)
elif framework == torch:
return torch.nn.Parameter(data=torch.tensor(data=initial_value,
dtype=dtype_universal(
dtype, torch_dtype=True),
requires_grad=trainable),
requires_grad=trainable)
raise RuntimeError("No variable support for framework: %s" % str(framework))
def grad(fn_outputs, inputs, grad_outputs=None, return_outputs=False):
""" Compute and returns the sum of gradients of outputs w.r.t. the inputs.
"""
if not callable(fn_outputs):
raise ValueError('fn_outputs must be a callable return a list of tensors')
inputs = nest.flatten(inputs)
if torch.is_tensor(inputs[0]):
outputs = nest.flatten(fn_outputs())
gradients = torch.autograd.grad(outputs=outputs,
inputs=inputs,
grad_outputs=grad_outputs)
elif tf.is_tensor(inputs[0]):
with tf.GradientTape() as tape:
tape.watch(inputs)
outputs = nest.flatten(fn_outputs())
gradients = tape.gradient(target=outputs,
sources=inputs,
output_gradients=grad_outputs)
if not return_outputs:
return gradients
return gradients, outputs
raise NotImplementedError(
"gradient function only support pytorch or tensorflow")
# ===========================================================================
# Allocation and masking
# ===========================================================================
def ones_like(x, dtype=None):
if tf.is_tensor(x):
return tf.ones_like(x, dtype=dtype)
if torch.is_tensor(x):
return torch.ones_like(x, dtype=dtype)
return np.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None):
if tf.is_tensor(x):
return tf.zeros_like(x, dtype=dtype)
if torch.is_tensor(x):
return torch.zeros_like(x, dtype=dtype)
return np.zeros_like(x, dtype=dtype)
def ones(shape, dtype='float32', framework=None):
framework = parse_framework(framework)
dtype = dtype_universal(dtype, framework=framework)
return framework.ones(shape, dtype=dtype)
def zeros(shape, dtype='float32', framework=None):
framework = parse_framework(framework)
dtype = dtype_universal(dtype, framework=framework)
return framework.zeros(shape, dtype=dtype)
def nonzeros(x, value):
""" Convert all zero entrities in `x` to a nonzeros `value`"""
return where(equal(x, 0.), zeros_like(x) + value, x)
def tril_mask(shape, framework=None):
""" Creates a lower-triangular boolean mask over the last 2 dimensions.
"""
row_index = cumsum(ones(shape=shape, dtype='int32', framework=framework),
axis=-2)
col_index = cumsum(ones(shape=shape, dtype='int32', framework=framework),
axis=-1)
return greater_equal(row_index, col_index)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
"""
if k == 0:
return tf.linalg.band_part(input=m, num_lower=-1, num_upper=0)
if k < 0:
return tf.subtract(
m, tf.linalg.band_part(input=m, num_lower=np.abs(k) - 1, num_upper=-1))
# k > 0
return tf.linalg.band_part(input=m, num_lower=-1, num_upper=k)
def tril_indices(n, k=0):
""" Similar as `numpy.tril_indices`
@Author: avdrher
https://github.com/GPflow/GPflow/issues/439
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
"""
M1 = tf.tile(tf.expand_dims(tf.range(n), axis=0), [n, 1])
M2 = tf.tile(tf.expand_dims(tf.range(n), axis=1), [1, n])
mask = tf.transpose((M1 - M2) >= -k)
ix1 = tf.boolean_mask(M2, mask)
ix2 = tf.boolean_mask(M1, mask)
return ix1, ix2
def prior2weights(prior,
exponential=False,
min_value=0.1,
max_value=None,
norm=False):
""" TODO: finish this
Parameters
----------
prior: numpy.ndarray [nb_classes,]
probabilty values of each classes prior,
sum of all prior must be equal to 1.
exponential: bool
min_value: bool
max_value: bool
norm: bool
if True, normalize output weights to sum up to 1.
"""
# idea is the one with highest prior equal to 1.
# and all other classes is the ratio to this prior
prior = np.array(prior).ravel()
prior = 1 / prior * np.max(prior)
# print(prior)
if exponential:
prior = sorted([(i, p) for i, p in enumerate(prior)],
key=lambda x: x[-1],
reverse=False)
alpha = interp.expIn(n=len(prior), power=10)
prior = {i: a * p for a, (i, p) in zip(alpha, prior)}
prior = np.array([prior[i] for i in range(len(prior))]) + 1
# ====== rescale everything within max_value ====== #
if min_value is not None and max_value is not None:
min_value = float(min_value)
max_value = float(max_value)
prior = (max_value - min_value) * (prior - np.min(prior)) \
/ (np.max(prior) - np.min(prior)) + min_value
# ====== normaize by ====== #
if norm:
prior = prior / np.sum(prior)
return prior
def entropy(p, name=None):
"""Return simple calculation of discrete Shanon entropy"""
with tf.name_scope(name, "entropy"):
return -tf.reduce_sum(p * tf.log(p))
def upsample(x, scale, axes, method='nn', name=None):
"""
Parameters
----------
scale: int, list of int
scaling up factor
axes: int, list of int
the axes of tensor which the upsampling method will be applied
method: str, int
'nn' for nearest neighbor (e.g. [1, 2] => [1, 1, 2, 2]),
'pad' for padding within the tensor. 'pad_margin' do padding
in the margin of the tensor. 'repeat' simple algorithm for
repeating the element (e.g. [1, 2] => [1, 2, 1, 2])
"""
with tf.name_scope(name, "Upsample"):
method = method.lower()
input_shape = tf.shape(x)
input_shape_int = x.shape.as_list()
ndims = x.shape.ndims
# normalize all negative axes
if axes is None:
raise ValueError("axes cannot be None.")
axes = [1, 2] if axes is None else \
[i % ndims for i in as_tuple(axes)]
sorted(axes)
# make scale a tuple
scale = as_tuple(scale, N=len(axes), t=int)
# mapping from axis -> scale
scale_map = defaultdict(lambda: 1)
scale_map.update([(i, j) for i, j in zip(axes, scale)])
# create final output_shape
output_shape = [input_shape[i] * scale_map[i] for i in range(ndims)]
# ====== Nearest neighbor method ====== #
if method == 'nn':
# tensorflow only support for tile <= 6-D tensor
if ndims >= 6:
raise ValueError(
'upsample with NN mode does not support rank >= 6 tensor.')
elif ndims + len(axes) > 6:
for a in axes:
x = upsample(x, scale_map[a], axes=a, method='nn')
else:
# repeat the tensor
x = transpose(x, pattern=list(range(ndims)) + ['x'] * len(axes))
x = repeat(x, scale, axes=[i for i in range(ndims, ndims + len(axes))])
# transpose it back to the right shape
axes_map = {i: j for i, j in zip(axes, range(ndims, ndims + len(axes)))}
new_axes = []
for i in range(ndims):
if i not in axes_map:
new_axes.append(i)
else:
new_axes += [i, axes_map[i]]
x = tf.transpose(x, perm=new_axes)
x = reshape(x, output_shape)
# ====== pading_margin ====== #
elif method.lower() == 'pad_margin':
paddings = [[0, 0] if i not in axes else [
tf.cast(tf.ceil(input_shape[i] * (scale_map[i] - 1) / 2), 'int32'),
tf.cast(tf.floor(input_shape[i] * (scale_map[i] - 1) / 2), 'int32')
] for i in range(ndims)]
x = tf.pad(x, paddings=paddings, mode='CONSTANT')
# ====== pading ====== #
elif method == 'pad':
raise NotImplementedError
# x = tf.scatter_nd(indices, x, shape=output_shape)
# ====== repeat ====== #
elif method == 'repeat':
x = repeat(x, n=scale, axes=axes)
# ====== no support ====== #
else:
raise ValueError("No support for method='%s'" % method)
# ====== add_shape ====== #
return set_shape(x,
shape=[
s * scale_map[i] if is_number(s) else None
for i, s in enumerate(input_shape_int)
])
# ===========================================================================
# Shape manipulation
# ===========================================================================
def reshape(x, shape):
""" More flexible version of reshape operation
Example
-------
x.shape = [25, 08, 12]
reshape(shape=([1], [2], [0]))
=> x.shape = (08, 12, 25)
"""
if tf.is_tensor(x):
fn_reshape = tf.reshape
elif torch.is_tensor(x):
fn_reshape = lambda _, shape: _.view(shape)
else:
fn_reshape = np.reshape
# start reshaping
input_shape = x.shape
new_shape = []
for i in shape:
if i is None:
new_shape.append(-1)
elif isinstance(i, (list, tuple)):
new_shape.append(input_shape[i[0]])
else:
new_shape.append(i)
new_shape = tuple([-1 if i is None else i for i in new_shape])
return fn_reshape(x, new_shape)
def expand_dims(x, axis):
if tf.is_tensor(x):
return tf.expand_dims(x, axis)
if torch.is_tensor(x):
return torch.unsqueeze(x, axis)
return np.expand_dims(x, axis)
def squeeze(x, axis):
if tf.is_tensor(x):
return tf.squeeze(x, axis)
if torch.is_tensor(x):
return torch.squeeze(x, axis)
return np.squeeze(x, axis)
def concatenate(x, axis):
if tf.is_tensor(x[0]):
return tf.concat(x, axis)
if torch.is_tensor(x[0]):
return torch.cat(x, axis)
return np.concatenate(x, axis)
def swapaxes(x, axis1, axis2):
""" Interchange two axes of an array. """
if tf.is_tensor(x):
perm = list(range(x.shape.ndims))
perm[axis1] = axis2
perm[axis2] = axis1
x = tf.transpose(x, perm)
elif torch.is_tensor(x):
x = x.transpose(axis1, axis2)
else:
x = np.swapaxes(x, axis1, axis2)
return x
def transpose(x, pattern):
""" Reorder the dimensions of this variable, optionally inserting
broadcasted dimensions.
Parameters
----------
pattern
List/tuple of int mixed with 'x' for broadcastable dimensions.
Examples
--------
For example, to create a 3D view of a [2D] matrix, call
``transpose([0,'x',1])``. This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``transpose([1, 'x', 0])``.
Notes
-----
This function supports the pattern passed as a tuple, or as a
variable-length argument (e.g. ``a.transpose(pattern)`` is equivalent
to ``a.transpose(*pattern)`` where ``pattern`` is a list/tuple of ints
mixed with 'x' characters).
@Author: Theano Authors
"""
permute_pattern = [i for i in pattern if i != 'x']
if tf.is_tensor(x):
x = tf.transpose(x, perm=permute_pattern)
# insert new dimension
for i, p in enumerate(pattern):
if p == 'x':
x = tf.expand_dims(x, i)
elif torch.is_tensor(x):
x = x.permute(permute_pattern)
for i, p in enumerate(pattern):
if p == 'x':
x = x.unsqueeze(i)
else:
x = np.transpose(x, permute_pattern)
for i, p in enumerate(pattern):
if p == 'x':
x = np.expand_dims(x, i)
return x
def flatten(x, outdim=1):
""" Keep all the original dimension until `outdim - 1`
"""
if tf.is_tensor(x):
input_shape = tf.shape(x)
elif torch.is_tensor(x):
input_shape = x.shape
else:
input_shape = x.shape
if outdim == 1:
output_shape = [-1]
else:
other_shape = tuple([input_shape[i] for i in range(outdim - 1)])
n = 1
for i in input_shape[(outdim - 1):]:
n = n * i
output_shape = other_shape + (n,)
return reshape(x, output_shape)
def repeat(x, n, axes=None, name="Repeat"):
""" Repeat a N-D tensor.
If x has shape (s1, s2, s3) and axes=(1, -1), the output
will have shape (s1, s2 * n[0], s3 * n[1]).
Parameters
----------
n : {int, list of int}
each number of repeatation according to the axes
axes : {int, list or int}
all axes for repeating
"""
# TODO
if axes is not None:
ndim = x.shape.ndims
if not isinstance(axes, (tuple, list)):
axes = (axes,)
axes = _normalize_axis(axes, ndim)
n = as_tuple(n, len(axes))
return tf.tile(
x,
multiples=[n[axes.index(i)] if i in axes else 1 for i in range(ndim)],
name=name)
else:
n = int(n)
return tf.tile(x, multiples=[n for i in range(ndim)], name=name)
# ===========================================================================
# Logical function
# ===========================================================================
def where(condition, x=None, y=None):
if tf.is_tensor(condition) or tf.is_tensor(x) or tf.is_tensor(y):
return tf.where(condition, x, y)
if torch.is_tensor(condition) or torch.is_tensor(x) or torch.is_tensor(y):
if not torch.is_tensor(x):
x = torch.tensor(x, dtype=y.dtype)
if not torch.is_tensor(y):
y = torch.tensor(y, dtype=x.dtype)
return torch.where(condition, x, y)
return np.where(condition, x, y)
def equal(x, y):
if tf.is_tensor(x) or tf.is_tensor(y):
return tf.equal(x, y)
if torch.is_tensor(x) or torch.is_tensor(y):
return x == y
return np.equal(x, y)
def greater_equal(x, y):
if tf.is_tensor(x) or tf.is_tensor(y):
return tf.greater_equal(x, y)
if torch.is_tensor(x) or torch.is_tensor(y):
return x >= y
return np.greater_equal(x, y)
def switch(condition, then_expression, else_expression):
condition = cast(condition, 'bool')
x_shape = copy.copy(then_expression.shape)
# tensorflow require the last dimension of 3 variables is equal, too
# it is irrelevant since condition can have shape[-1] = 1
cond_ndims = condition.shape.ndims
if cond_ndims > 1 and condition.shape[-1] != x_shape[-1]:
cond_shape = tf.shape(condition)
condition = tf.reshape(condition,
[cond_shape[i] for i in range(cond_ndims - 1)])
x = tf.where(condition, then_expression, else_expression)
x.set_shape(x_shape)
return x
# ===========================================================================
# Logical functions
# ===========================================================================
def logical_(fn, x, y):
if x is None:
return y
if y is None:
return x
if fn == 'and':
fn = lambda x, y: x & y
elif fn == 'or':
fn = lambda x, y: x | y
else:
raise NotImplementedError(str(fn))
return fn(x, y)
def logical_and(x, y):
""" More flexible version of and operator that handle the case `x` or `y`
might be `None` """
return logical_('and', x, y)
def logical_or(x, y):
""" More flexible version of and operator that handle the case `x` or `y`
might be `None` """
return logical_('or', x, y)
def logical_not(x):
return ~x
def apply_mask(x, mask):
"""
x : 3D tensor
mask : 2D tensor
Example
-------
>>> Input: [128, 500, 120]
>>> Mask: [1, 1, 0]
>>> Output: [128, 500, 0]
"""
return x * expand_dims(mask, -1)
| {
"repo_name": "imito/odin",
"path": "odin/backend/tensor.py",
"copies": "1",
"size": "22253",
"license": "mit",
"hash": 5555192174463475000,
"line_mean": 29.0716216216,
"line_max": 80,
"alpha_frac": 0.5900328046,
"autogenerated": false,
"ratio": 3.5066183422628425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45966511468628424,
"avg_score": null,
"num_lines": null
} |
""" A collection of functions for RSS feed parsing"""
import discord
from dateutil import parser
import re
from __main__ import botdata
from bs4 import BeautifulSoup
def is_new_blog(entry):
"""Takes the newest dota blog entry, and checks data against record
returns a boolean
updates blog entry if it is new"""
new = parser.parse(entry.published) #date on 'new' entry
old = botdata["dotablog"]
if new:
if old:
if parser.parse(old)< new:#compare and replace if new is greater
botdata["dotablog"]=entry.published
return True
else:
return False
else:
botdata["dotablog"]=entry.published#initialize if there is no prior date
return False #but we don't want to post, so say it isn't new
else:
return False
def create_embed(blog_title, entry):
""" Takes a blog title and feedparser entry, and returns a rich embed object linking to the post"""
response = discord.Embed(type='rich')
### pull the hook from the entry html for introduction
soup = BeautifulSoup(entry.content[0]['value'], "html.parser")
first_paragraph = ""
for p in soup.find_all('p'): #find first paragraph of text
if p.text != '':
first_paragraph = p.text
break
sentence = re.split('(?<=[.!?]) +',first_paragraph) #split the paragraph into sentences
hook = ""
if len(sentence)< 2: #limit hook to first two senteces of that paragraph
hook = first_paragraph
else:
hook = sentence[0]+' '+sentence[1]
###pull other data
link = entry.link #pull link for newest blog
published = parser.parse(entry.published) #date
image=soup.find("img" )
header = f'The {blog_title} has updated!'
###assign things to the embed object
response.title = entry.title
if image: #there may not be one
response.set_image(url = image["src"])
response.image.proxy_url=link
response.timestamp = published
response.add_field(name = header, value = hook , inline = False)
response.url = link
return response
| {
"repo_name": "mdiller/MangoByte",
"path": "cogs/utils/rsstools.py",
"copies": "1",
"size": "1917",
"license": "mit",
"hash": 5920055542606382000,
"line_mean": 29.9193548387,
"line_max": 100,
"alpha_frac": 0.710485133,
"autogenerated": false,
"ratio": 3.380952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4591437513952381,
"avg_score": null,
"num_lines": null
} |
# A collection of functions relating to spacegroup symmetry information
import re
from cctbx import sgtbx
from cctbx.sgtbx.bravais_types import bravais_lattice
_int_re = re.compile("^[0-9]*$")
def get_pointgroup(name):
"""Get the pointgroup for this spacegroup, e.g. P422 for P43212."""
space_group = sgtbx.space_group_info(name).group()
point_group = (
space_group.build_derived_patterson_group().build_derived_acentric_group()
)
return point_group.type().lookup_symbol().replace(" ", "")
def get_lattice(name):
"""Get the lattice for a named spacegroup."""
# check that this isn't already a lattice name
if name in [
"aP",
"mP",
"mC",
"oP",
"oC",
"oI",
"oF",
"tP",
"tI",
"hR",
"hP",
"cP",
"cI",
"cF",
]:
return name
if isinstance(name, int):
lattice = bravais_lattice(number=name)
elif _int_re.match(name):
name = int(name)
lattice = bravais_lattice(number=name)
else:
lattice = bravais_lattice(symbol=str(name))
return str(lattice)
def spacegroup_number_to_name(spacegroup_number):
"""Return the name of this spacegroup."""
return sgtbx.space_group_info(spacegroup_number).type().lookup_symbol()
def spacegroup_name_to_number(spacegroup):
"""Return the number corresponding to this spacegroup."""
# check have not had number passed in
try:
number = int(spacegroup)
return number
except ValueError:
pass
return sgtbx.space_group_info(str(spacegroup)).type().number()
def get_num_symops(spacegroup_number):
"""Get the number of symmetry operations that spacegroup
number has."""
return len(sgtbx.space_group_info(number=spacegroup_number).group())
class _Syminfo:
"""Legacy method of accessing functions."""
Syminfo = _Syminfo()
Syminfo.get_pointgroup = get_pointgroup
Syminfo.get_lattice = get_lattice
Syminfo.spacegroup_number_to_name = spacegroup_number_to_name
Syminfo.spacegroup_name_to_number = spacegroup_name_to_number
Syminfo.get_num_symops = get_num_symops
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/Syminfo.py",
"copies": "1",
"size": "2172",
"license": "bsd-3-clause",
"hash": 2179873679552812500,
"line_mean": 23.6818181818,
"line_max": 82,
"alpha_frac": 0.6404235727,
"autogenerated": false,
"ratio": 3.4097331240188384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45501566967188384,
"avg_score": null,
"num_lines": null
} |
""" A collection of functions to assist with archiving/restoring epoch files.
Epoch files are the few files which can change from one backup to the next and so must be
date stamped to differentiate multiple backups from each other.
Typically only archive_epoch_files and restore_epoch_files are called
Copyright 2014 Hewlett-Packard Development Company, L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from glob import glob
import logging
import os
import shutil
log = logging.getLogger(__name__)
class EpochFiles(object):
""" The on disk epoch files and methods to archive/restore them for a given date.
"""
def __init__(self, backup_dir, catalog_dir, snapshot_name, date):
self.date_str = date.strftime("%Y_%m_%d_%H%M")
self.epoch_files = self._get_epoch_files(backup_dir, catalog_dir, snapshot_name)
@staticmethod
def _get_epoch_files(backup_dir, catalog_dir, snapshot_name):
full_backup_path = os.path.join(backup_dir, catalog_dir.strip('/'))
files = [
os.path.join(backup_dir, snapshot_name + '.txt'),
os.path.join(backup_dir, snapshot_name + '.info'),
glob(os.path.join(full_backup_path, 'v_*_catalog/Snapshots'))[0] + '/catalog.ctlg',
]
return files
@staticmethod
def _move_file(from_path, to_path):
""" Move a file on the local filesystem, logging an error if the from file does not exist.
"""
if os.path.exists(from_path):
shutil.move(from_path, to_path)
else:
log.error('File %s not found when attempting to move to %s' % (from_path, to_path))
def archive(self):
""" Copy epoch files to their date stamped names
"""
for path in self.epoch_files:
self._move_file(path, "%s_%s" % (path, self.date_str))
def restore(self):
""" Copy epoch files from their date stamped names to their standard names
"""
for path in self.epoch_files:
self._move_file("%s_%s" % (path, self.date_str), path)
| {
"repo_name": "tkuhlman/vertica-swift-backup",
"path": "vertica_backup/epoch.py",
"copies": "1",
"size": "3058",
"license": "mit",
"hash": -5792982280293207000,
"line_mean": 42.6857142857,
"line_max": 102,
"alpha_frac": 0.6899934598,
"autogenerated": false,
"ratio": 4.013123359580052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0040194319675931674,
"num_lines": 70
} |
"""A collection of functions to summarize object information.
This module provides several function which will help you to analyze object
information which was gathered. Often it is sufficient to work with aggregated
data instead of handling the entire set of existing objects. For example can a
memory leak identified simple based on the number and size of existing objects.
A summary contains information about objects in a table-like manner.
Technically, it is a list of lists. Each of these lists represents a row,
whereas the first column reflects the object type, the second column the number
of objects, and the third column the size of all these objects. This allows a
simple table-like output like the following:
============= ============ =============
types # objects total size
============= ============ =============
<type 'dict'> 2 560
<type 'str'> 3 126
<type 'int'> 4 96
<type 'long'> 2 66
<type 'list'> 1 40
============= ============ =============
Another advantage of summaries is that they influence the system you analyze
only to a minimum. Working with references to existing objects will keep these
objects alive. Most of the times this is no desired behavior (as it will have
an impact on the observations). Using summaries reduces this effect greatly.
output representation
---------------------
The output representation of types is defined in summary.representations.
Every type defined in this dictionary will be represented as specified. Each
definition has a list of different representations. The later a representation
appears in this list, the higher its verbosity level. From types which are not
defined in summary.representations the default str() representation will be
used.
Per default, summaries will use the verbosity level 1 for any encountered type.
The reason is that several computations are done with summaries and rows have
to remain comparable. Therefore information which reflect an objects state,
e.g. the current line number of a frame, should not be included. You may add
more detailed information at higher verbosity levels than 1.
"""
import re
import sys
import types
from pympler.util import stringutils
# default to asizeof if sys.getsizeof is not available (prior to Python 2.6)
try:
from sys import getsizeof as _getsizeof
except ImportError:
from pympler.asizeof import flatsize
_getsizeof = flatsize
representations = {}
def _init_representations():
global representations
if sys.hexversion < 0x2040000:
classobj = [
lambda c: "classobj(%s)" % repr(c),
]
representations[types.ClassType] = classobj
instance = [
lambda f: "instance(%s)" % repr(f.__class__),
]
representations[types.InstanceType] = instance
instancemethod = [
lambda i: "instancemethod (%s)" % (repr(i.im_func)),
lambda i: "instancemethod (%s, %s)" % (repr(i.im_class),
repr(i.im_func)),
]
representations[types.MethodType] = instancemethod
frame = [
lambda f: "frame (codename: %s)" % (f.f_code.co_name),
lambda f: "frame (codename: %s, codeline: %s)" %
(f.f_code.co_name, f.f_code.co_firstlineno),
lambda f: "frame (codename: %s, filename: %s, codeline: %s)" %
(f.f_code.co_name, f.f_code.co_filename,
f.f_code.co_firstlineno)
]
representations[types.FrameType] = frame
_dict = [
lambda d: str(type(d)),
lambda d: "dict, len=%s" % len(d),
]
representations[dict] = _dict
function = [
lambda f: "function (%s)" % f.__name__,
lambda f: "function (%s.%s)" % (f.__module, f.__name__),
]
representations[types.FunctionType] = function
_list = [
lambda l: str(type(l)),
lambda l: "list, len=%s" % len(l)
]
representations[list] = _list
module = [lambda m: "module(%s)" % m.__name__]
representations[types.ModuleType] = module
_set = [
lambda s: str(type(s)),
lambda s: "set, len=%s" % len(s)
]
representations[set] = _set
_init_representations()
def summarize(objects):
"""Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
"""
count = {}
total_size = {}
for o in objects:
otype = _repr(o)
if otype in count:
count[otype] += 1
total_size[otype] += _getsizeof(o)
else:
count[otype] = 1
total_size[otype] = _getsizeof(o)
rows = []
for otype in count:
rows.append([otype, count[otype], total_size[otype]])
return rows
def get_diff(left, right):
"""Get the difference of two summaries.
Subtracts the values of the right summary from the values of the left
summary.
If similar rows appear on both sides, the are included in the summary with
0 for number of elements and total size.
If the number of elements of a row of the diff is 0, but the total size is
not, it means that objects likely have changed, but not there number, thus
resulting in a changed size.
"""
res = []
for row_r in right:
found = False
for row_l in left:
if row_r[0] == row_l[0]:
res.append([row_r[0], row_r[1] - row_l[1],
row_r[2] - row_l[2]])
found = True
if not found:
res.append(row_r)
for row_l in left:
found = False
for row_r in right:
if row_l[0] == row_r[0]:
found = True
if not found:
res.append([row_l[0], -row_l[1], -row_l[2]])
return res
def format_(rows, limit=15, sort='size', order='descending'):
"""Format the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
localrows = []
for row in rows:
localrows.append(list(row))
# input validation
sortby = ['type', '#', 'size']
if sort not in sortby:
raise ValueError("invalid sort, should be one of" + str(sortby))
orders = ['ascending', 'descending']
if order not in orders:
raise ValueError("invalid order, should be one of" + str(orders))
# sort rows
if sortby.index(sort) == 0:
if order == "ascending":
localrows.sort(key=lambda x: _repr(x[0]))
elif order == "descending":
localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
else:
if order == "ascending":
localrows.sort(key=lambda x: x[sortby.index(sort)])
elif order == "descending":
localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
# limit rows
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
# print rows
localrows.insert(0, ["types", "# objects", "total size"])
return _format_table(localrows)
def _format_table(rows, header=True):
"""Format a list of lists as a pretty table.
Keyword arguments:
header -- if True the first row is treated as a table header
inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662
"""
border = "="
# vertical delimiter
vdelim = " | "
# padding nr. of spaces are left around the longest element in the
# column
padding = 1
# may be left,center,right
justify = 'right'
justify = {'left': str.ljust,
'center': str.center,
'right': str.rjust}[justify.lower()]
# calculate column widths (longest item in each col
# plus "padding" nr of spaces on both sides)
cols = zip(*rows)
colWidths = [max([len(str(item)) + 2 * padding for item in col])
for col in cols]
borderline = vdelim.join([w * border for w in colWidths])
for row in rows:
yield vdelim.join([justify(str(item), width)
for (item, width) in zip(row, colWidths)])
if header:
yield borderline
header = False
def print_(rows, limit=15, sort='size', order='descending'):
"""Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
for line in format_(rows, limit=limit, sort=sort, order=order):
print(line)
# regular expressions used by _repr to replace default type representations
type_prefix = re.compile(r"^<type '")
address = re.compile(r' at 0x[0-9a-f]+')
type_suffix = re.compile(r"'>$")
def _repr(o, verbosity=1):
"""Get meaning object representation.
This function should be used when the simple str(o) output would result in
too general data. E.g. "<type 'instance'" is less meaningful than
"instance: Foo".
Keyword arguments:
verbosity -- if True the first row is treated as a table header
"""
res = ""
t = type(o)
if (verbosity == 0) or (t not in representations):
res = str(t)
else:
verbosity -= 1
if len(representations[t]) < verbosity:
verbosity = len(representations[t]) - 1
res = representations[t][verbosity](o)
res = address.sub('', res)
res = type_prefix.sub('', res)
res = type_suffix.sub('', res)
return res
def _traverse(summary, function, *args):
"""Traverse all objects of a summary and call function with each as a
parameter.
Using this function, the following objects will be traversed:
- the summary
- each row
- each item of a row
"""
function(summary, *args)
for row in summary:
function(row, *args)
for item in row:
function(item, *args)
def _subtract(summary, o):
"""Remove object o from the summary by subtracting it's size."""
found = False
row = [_repr(o), 1, _getsizeof(o)]
for r in summary:
if r[0] == row[0]:
(r[1], r[2]) = (r[1] - row[1], r[2] - row[2])
found = True
if not found:
summary.append([row[0], -row[1], -row[2]])
return summary
def _sweep(summary):
"""Remove all rows in which the total size and the total number of
objects is zero.
"""
return [row for row in summary if ((row[2] != 0) or (row[1] != 0))]
| {
"repo_name": "swiftstack/pympler",
"path": "pympler/summary.py",
"copies": "3",
"size": "10768",
"license": "apache-2.0",
"hash": 3904276591235828700,
"line_mean": 32.2345679012,
"line_max": 79,
"alpha_frac": 0.6029903418,
"autogenerated": false,
"ratio": 3.8650394831299355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012860082304526747,
"num_lines": 324
} |
"""A collection of functions which act on surfaces or lists of surfaces."""
from collections import defaultdict
from itertools import combinations
from typing import Dict, List, Tuple, Union # noqa
import warnings
from eppy.bunch_subclass import EpBunch # noqa
from eppy.idf_msequence import Idf_MSequence # noqa
from numpy import float64 # noqa
from shapely.geometry import Polygon
from shapely.ops import polygonize
from shapely.ops import unary_union
from geomeppy.geom.polygons import Polygon2D
from .polygons import intersect, Polygon3D
from .vectors import Vector2D, Vector3D # noqa
from ..utilities import almostequal
def set_coords(
surface, # type: EpBunch
coords,
# type: Union[List[Vector3D], List[Tuple[float, float, float]], Polygon3D]
ggr, # type: Union[List, None, Idf_MSequence]
):
# type: (...) -> None
"""Update the coordinates of a surface.
:param surface: The surface to modify.
:param coords: The new coordinates as lists of [x,y,z] lists.
:param ggr: Global geometry rules.
"""
coords = list(coords)
deduped = [c for i, c in enumerate(coords) if c != coords[(i + 1) % len(coords)]]
poly = Polygon3D(deduped)
poly = poly.normalize_coords(ggr)
coords = [i for vertex in poly for i in vertex]
if len(coords) > 120:
warnings.warn(
"To create surfaces with >120 vertices, ensure you have customised your IDD before running EnergyPlus. "
"https://unmethours.com/question/9343/energy-idf-parsing-error/?answer=9344#post-id-9344"
)
# find the vertex fields
n_vertices_index = surface.objls.index("Number_of_Vertices")
first_x = n_vertices_index + 1 # X of first coordinate
surface.obj = surface.obj[:first_x]
# set the vertex field values
surface.fieldvalues.extend(coords)
def set_matched_surfaces(surface, matched):
# type: (EpBunch, EpBunch) -> None
"""Set boundary conditions for two adjoining surfaces.
:param surface: The first surface.
:param matched: The second surface.
"""
if (
str(surface.key).upper() == "BUILDINGSURFACE:DETAILED"
and str(matched.key).upper() == "BUILDINGSURFACE:DETAILED"
):
for s in [surface, matched]:
s.Outside_Boundary_Condition = "surface"
s.Sun_Exposure = "NoSun"
s.Wind_Exposure = "NoWind"
surface.Outside_Boundary_Condition_Object = matched.Name
matched.Outside_Boundary_Condition_Object = surface.Name
elif str(surface.key).upper() == "BUILDINGSURFACE:DETAILED" and str(
matched.key
).upper() in ({"SHADING:SITE:DETAILED", "SHADING:ZONE:DETAILED"}):
surface.Outside_Boundary_Condition = "adiabatic"
surface.Sun_Exposure = "NoSun"
surface.Wind_Exposure = "NoWind"
elif str(matched.key).upper() == "BUILDINGSURFACE:DETAILED" and str(
surface.key
).upper() in ({"SHADING:SITE:DETAILED", "SHADING:ZONE:DETAILED"}):
matched.Outside_Boundary_Condition = "adiabatic"
matched.Sun_Exposure = "NoSun"
matched.Wind_Exposure = "NoWind"
def set_unmatched_surface(surface, vector):
# type: (EpBunch, Union[Vector2D, Vector3D]) -> None
"""Set boundary conditions for a surface which does not adjoin another one.
:param surface: The surface.
:param vector: The surface normal vector.
"""
if not hasattr(surface, "View_Factor_to_Ground"):
return
surface.View_Factor_to_Ground = "autocalculate"
poly = Polygon3D(surface.coords)
if min(poly.zs) < 0 or all(z == 0 for z in poly.zs):
# below ground or ground-adjacent surfaces
surface.Outside_Boundary_Condition_Object = ""
surface.Outside_Boundary_Condition = "ground"
surface.Sun_Exposure = "NoSun"
surface.Wind_Exposure = "NoWind"
else:
surface.Outside_Boundary_Condition = "outdoors"
surface.Outside_Boundary_Condition_Object = ""
surface.Wind_Exposure = "WindExposed"
if almostequal(vector, (0, 0, -1)):
# downward facing surfaces
surface.Sun_Exposure = "NoSun"
else:
surface.Sun_Exposure = "SunExposed" # other external surfaces
def getidfplanes(surfaces):
# type: (Idf_MSequence) -> Dict[float64, Dict[Union[Vector2D, Vector3D], List[EpBunch]]]
"""Fast access data structure for potentially matched surfaces.
Get a data structure populated with all the surfaces in the IDF, keyed by their distance from the origin, and their
normal vector.
:param surfaces: List of all the surfaces.
:returns: Mapping to look up IDF surfaces.
"""
round_factor = 8
planes = {} # type: Dict[float64, Dict[Union[Vector2D, Vector3D], List[EpBunch]]]
for s in surfaces:
poly = Polygon3D(s.coords)
rounded_distance = round(poly.distance, round_factor)
rounded_normal_vector = Vector3D(
*[round(axis, round_factor) for axis in poly.normal_vector]
)
planes.setdefault(rounded_distance, {}).setdefault(
rounded_normal_vector, []
).append(s)
return planes
def get_adjacencies(surfaces):
# type: (Idf_MSequence) -> defaultdict
"""Create a dictionary mapping surfaces to their adjacent surfaces.
:param surfaces: A mutable list of surfaces.
:returns: Mapping of surfaces to adjacent surfaces.
"""
adjacencies = defaultdict(list) # type: defaultdict
# find all adjacent surfaces
for s1, s2 in combinations(surfaces, 2):
adjacencies = populate_adjacencies(adjacencies, s1, s2)
for adjacency, polys in adjacencies.items():
adjacencies[adjacency] = minimal_set(polys)
return adjacencies
def minimal_set(polys):
"""Remove overlaps from a set of polygons.
:param polys: List of polygons.
:returns: List of polygons with no overlaps.
"""
normal = polys[0].normal_vector
as_2d = [p.project_to_2D() for p in polys]
as_shapely = [Polygon(p) for p in as_2d]
lines = [p.boundary for p in as_shapely]
borders = unary_union(lines)
shapes = [Polygon2D(p.boundary.coords) for p in polygonize(borders)]
as_3d = [p.project_to_3D(polys[0]) for p in shapes]
if not almostequal(as_3d[0].normal_vector, normal):
as_3d = [p.invert_orientation() for p in as_3d]
return [p for p in as_3d if p.area > 0]
def populate_adjacencies(adjacencies, s1, s2):
# type: (defaultdict, EpBunch, EpBunch) -> defaultdict
"""Update the adjacencies dict with any intersections between two surfaces.
:param adjacencies: Dict to contain lists of adjacent surfaces.
:param s1: Object representing an EnergyPlus surface.
:param s2: Object representing an EnergyPlus surface.
:returns: An updated dict of adjacencies.
"""
poly1 = Polygon3D(s1.coords)
poly2 = Polygon3D(s2.coords)
if not almostequal(abs(poly1.distance), abs(poly2.distance), 4):
return adjacencies
if not almostequal(poly1.normal_vector, poly2.normal_vector, 4):
if not almostequal(poly1.normal_vector, -poly2.normal_vector, 4):
return adjacencies
intersection = poly1.intersect(poly2)
if intersection:
new_surfaces = intersect(poly1, poly2)
new_s1 = [
s
for s in new_surfaces
if almostequal(s.normal_vector, poly1.normal_vector, 4)
]
new_s2 = [
s
for s in new_surfaces
if almostequal(s.normal_vector, poly2.normal_vector, 4)
]
adjacencies[(s1.key, s1.Name)] += new_s1
adjacencies[(s2.key, s2.Name)] += new_s2
return adjacencies
| {
"repo_name": "jamiebull1/geomeppy",
"path": "geomeppy/geom/surfaces.py",
"copies": "1",
"size": "7663",
"license": "mit",
"hash": -8191508465568749000,
"line_mean": 37.315,
"line_max": 119,
"alpha_frac": 0.661490278,
"autogenerated": false,
"ratio": 3.427101967799642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4588592245799642,
"avg_score": null,
"num_lines": null
} |
'A collection of general purpose tools for reading files'
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import print_function
import logging
import zlib
from collections import namedtuple
from struct import Struct
log = logging.getLogger("metpy.io.tools")
log.setLevel(logging.WARNING)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
class NamedStruct(Struct):
def __init__(self, info, prefmt='', tuple_name=None):
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self._tuple(*items)
def unpack(self, s):
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
bytes = fobj.read(self.size)
return self.unpack(bytes)
# This works around times when we have more than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
def __init__(self, info, prefmt=''):
names, formats = zip(*info)
# Remove empty names
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
def __init__(self, *args, **kwargs):
self.val_map = dict()
# Assign values for args in order starting at 0
for ind, a in enumerate(args):
self.val_map[ind] = a
# Invert the kwargs dict so that we can map from value to name
for k in kwargs:
self.val_map[kwargs[k]] = k
def __call__(self, val):
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
def __init__(self, num_bits):
self._bits = range(num_bits)
def __call__(self, val):
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
def __init__(self, *names):
self._names = names
def __call__(self, val):
if not val:
return None
l = []
for n in self._names:
if val & 0x1:
l.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return l[0] if len(l) == 1 else l
class Array(object):
def __init__(self, fmt):
self._struct = Struct(fmt)
def __call__(self, buf):
return list(self._struct.unpack(buf))
class IOBuffer(object):
def __init__(self, source):
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
return cls(fobj.read())
def set_mark(self):
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
return self._offset - self._bookmarks[mark]
def clear_marks(self):
self._bookmarks = []
def splice(self, mark, newdata):
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata)
def read_struct(self, struct_class):
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(self.read_struct(Struct(order + '%d' % num + item_type)))
def read_int(self, code):
return self.read_struct(Struct(code))[0]
def read(self, num_bytes=None):
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
self._data = self._data[:-num_bytes]
def at_end(self):
return self._offset >= len(self._data)
def __getitem__(self, item):
return self._data[item]
def __str__(self):
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def print_next(self, num_bytes):
print(' '.join('%02x' % c for c in self.get_next(num_bytes)))
def __len__(self):
return len(self._data)
def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
while data:
decomp = zlib.decompressobj()
try:
frames.extend(decomp.decompress(data))
data = decomp.unused_data
except zlib.error:
frames.extend(data)
break
return frames
def bits_to_code(val):
if val == 8:
return 'B'
elif val == 16:
return 'H'
else:
log.warning('Unsupported bit size: %s. Returning "B"', val)
return 'B'
# For debugging
def hexdump(buf, num_bytes, offset=0, width=32):
ind = offset
end = offset + num_bytes
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '%02X'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
blocks += ['--' * blocksize] * (width // blocksize - len(blocks))
hexoutput = ' '.join(blocks)
printable = tuple(chunk)
print(hexoutput % printable, str(ind).ljust(len(str(end))),
str(ind - offset).ljust(len(str(end))),
''.join(chr(c) if 31 < c < 128 else '.' for c in chunk), sep=' ')
ind += width
| {
"repo_name": "deeplycloudy/MetPy",
"path": "metpy/io/tools.py",
"copies": "1",
"size": "8391",
"license": "bsd-3-clause",
"hash": -1679596973043277300,
"line_mean": 28.0346020761,
"line_max": 91,
"alpha_frac": 0.5780002384,
"autogenerated": false,
"ratio": 3.737639198218263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9812875609517187,
"avg_score": 0.0005527654202153058,
"num_lines": 289
} |
"""A collection of helper/utility functions."""
import re
from . import exceptions as ex
API_ENDPOINT = 'https://api.emailhunter.co/v1/'
EMAIL_URL = API_ENDPOINT + 'verify?email={0}&api_key={1}'
DOMAIN_URL = (API_ENDPOINT + 'search?domain={0}&api_key={1}'
'&offset={2}&type={3}')
EMAIL_RE = re.compile(
'''
[\w\d.+-]+ # username
@
([\w\d.]+\.)+ # domain name prefix
(com|org|edu|io|me) # support more top-level domains
''',
re.UNICODE | re.VERBOSE | re.IGNORECASE)
# From Django, slightly modified (http://bit.ly/1ILBmfL)
URL_RE = re.compile(
'''
^(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+
(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)| # domain...
localhost| # localhost...
\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}| # ...or ipv4
\[?[A-F0-9]*:[A-F0-9:]+\]?) # ...or ipv6
(?::\d+)? # optional port
(?:/?|[/?]\S+)$
''',
re.VERBOSE | re.IGNORECASE)
def get_query_type(query):
"""Gets the type of query in use (email or url).
In order to provide a proper API endpoint it is necessary to first
determine the type of query the client is using. There are only two
options currently: 1) domain or 2) email.
:param query: Search query provided by client.
"""
if URL_RE.match(query):
query_type = 'domain'
elif EMAIL_RE.match(query):
query_type = 'email'
else:
query_type = ''
return query_type
def get_endpoint(api_key, query, offset, type):
"""Return endpoint URL for the relevant search type.
The base API endpoint only varies by type of search requested, of
which there are two: 1) domain search and 2) email search. Each
search type requires different parameters, though api_key is common
between them.
Note: if both a url and email address are provided the endpoint
returned will default to the domain search as it is considered to
be the primary function of the API and thus takes precedent.
:param api_key: Secret client API key.
:param query: URL or email address on which to search.
:param offset: Specifies the number of emails to skip.
:param type: Specifies email type (i.e. generic or personal).
"""
query_type = get_query_type(query)
if query_type not in ('domain', 'email'):
raise ex.InvalidQueryStringException('Invalid query string')
if query_type == 'domain':
return DOMAIN_URL.format(query, api_key, offset, type)
else:
return EMAIL_URL.format(query, api_key)
| {
"repo_name": "jgoodlet/punter",
"path": "punter/helpers.py",
"copies": "1",
"size": "2663",
"license": "mit",
"hash": 5843302513730481000,
"line_mean": 29.9651162791,
"line_max": 71,
"alpha_frac": 0.5846789335,
"autogenerated": false,
"ratio": 3.5131926121372032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9576430861393446,
"avg_score": 0.004288136848751364,
"num_lines": 86
} |
"""A collection of helpul utility functions for analysis of pypsych outputs."""
import pandas as pd
import numpy as np
def collapse_bins(df, prefixes):
"""Collapse (mean) any columns that start with each prefix."""
for prefix in prefixes:
sel = [colname.startswith(prefix) for colname in df.columns]
df[prefix] = df.loc[:, sel].mean(axis=1)
df.drop(df.columns[sel], axis=1, inplace=True)
pass
def find_prefix_cols(df, prefix):
"""List all df columns with the given prefix."""
sel = [colname.startswith(prefix) for colname in df.columns]
return df.columns[sel]
def findreplace_prefix_cols(df, old_prefix, new_prefix):
"""List columns names with old prefix replaced by new prefix."""
sel = [colname.startswith(old_prefix) for colname in df.columns]
old_cols = df.columns[sel]
new_cols = [old_col.replace(old_prefix, new_prefix, 1)
for old_col in old_cols]
return new_cols
def get_renaming_dict(df, old_prefix, new_prefix):
"""Generate re_dict for use in pandas.DataFrame.rename(columns=re_dict)."""
newcols = findreplace_prefix_cols(df, old_prefix, new_prefix)
oldcols = find_prefix_cols(df, old_prefix)
re_dict = dict(zip(oldcols, newcols))
return re_dict
def merge_and_rename_columns(df, new_name, old_names):
"""
Create new_name column by filling in non-nan values from old_names in order.
"""
res = df.copy(deep=True)
if type(old_names) is not list:
if new_name != old_names:
res[new_name] = np.nan
sel = ~pd.notnull(res[new_name])
res.loc[sel, new_name] = res.loc[sel, old_names]
else:
if new_name not in old_names:
res[new_name] = np.nan
for old_name in old_names:
sel = ~pd.notnull(res[new_name])
res.loc[sel, new_name] = res.loc[sel, old_name]
return res
| {
"repo_name": "janmtl/pypsych",
"path": "pypsych/utils.py",
"copies": "1",
"size": "1894",
"license": "bsd-3-clause",
"hash": -2243172058632679700,
"line_mean": 32.2280701754,
"line_max": 80,
"alpha_frac": 0.6404435058,
"autogenerated": false,
"ratio": 3.3228070175438598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44632505233438596,
"avg_score": null,
"num_lines": null
} |
"""A collection of hemodynamic response functions. """
import numpy as np
import nitime as nt
import roi
def double_gamma(self, width=32, a1=6.0, a2=12.0, b1=0.9, b2=0.9, c=0.35):
""" Returns a HRF. Defaults are the canonical parameters. """
x_range = np.arange(0, width, self.TR)
d1 = a1 * b1
d2 = a2 * b2
# Vectorized...
hrf = ((x_range / d1) ** a1 * np.exp((d1 - x_range) / b1)) - (
c * (x_range / d2) ** a2 *np.exp((d2 - x_range) / b2))
return hrf
def mean_fir(self, window_size=30):
""" Estimate and return the average (for all condtions in trials)
finite impulse-response model using self.bold and self.trials.
<window_size> is the expected length of the haemodynamic response
in TRs. """
bold = self.bold.copy()
if bold == None:
raise ValueError(
'No bold signal is defined. Try create_bold()?')
# Convert trials to tr
trials_in_tr = roi.timing.dtime(self.trials, self.durations, None, 0)
# Truncate bold or trials_in_tr if needed
try:
bold = bold[0:trials_in_tr.shape[0]]
trials_in_tr = trials_in_tr[0:bold.shape[0]]
except IndexError:
pass
# Convert self.bold (an array) to a nitime TimeSeries
# instance
ts_bold = nt.TimeSeries(bold, sampling_interval=self.TR)
# And another one for the events (the different stimuli):
ts_trials = nt.TimeSeries(trials_in_tr, sampling_interval=self.TR)
# Create a nitime Analyzer instance.
eva = nt.analysis.EventRelatedAnalyzer(ts_bold, ts_trials, window_size)
# Now do the find the event-relared averaged by FIR:
# For details see the nitime module and,
#
# M.A. Burock and A.M.Dale (2000). Estimation and Detection of
# Event-Related fMRI Signals with Temporally Correlated Noise: A
# Statistically Efficient and Unbiased Approach. Human Brain
# Mapping, 11:249-260
hrf = eva.FIR.data
if hrf.ndim == 2:
hrf = hrf.mean(0)
## hrf if the mean of all
## conditions in trials
hrf = hrf/hrf.max()
## Norm it
return hrf
| {
"repo_name": "parenthetical-e/roi",
"path": "hrfs.py",
"copies": "1",
"size": "2142",
"license": "bsd-2-clause",
"hash": -6316221245319187000,
"line_mean": 29.6,
"line_max": 75,
"alpha_frac": 0.6209150327,
"autogenerated": false,
"ratio": 3.15,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9113256309348292,
"avg_score": 0.03153174467034164,
"num_lines": 70
} |
"""A collection of hemodynamic response functions."""
import numpy as np
import scipy.stats as stats
from modelmodel.misc import process_prng
def double_gamma(width=32, TR=1, a1=6.0, a2=12., b1=0.9, b2=0.9, c=0.35):
"""
Returns a HRF. Defaults are the canonical parameters.
"""
x_range = np.arange(0, width, TR)
d1 = a1 * b1
d2 = a2 * b2
hrf = ((x_range / d1) ** a1 * np.exp((d1 - x_range) / b1)) - (
c * (x_range / d2) ** a2 *np.exp((d2 - x_range) / b2))
return hrf
def _preturb(weight, width=32, TR=1, a1=6.0, a2=12., b1=0.9, b2=0.9, c=0.35, prng=None):
prng = process_prng(prng)
np.random.set_state(prng.get_state())
# Parameters to preturb
params = {a1:6.0, a2:12.0, b1:0.9, b2:0.9, c:0.35}
# Preturb it
keys = params.keys()
prng.shuffle(keys)
par = params[keys[0]]
params[keys[0]] = prng.normal(loc=par, scale=par / (1. * weight))
# Add unpreturbed params
params['width'] = width
params['TR'] = TR
return params, prng
def preturb_double_gamma(weight, width=32, TR=1, a1=6.0, a2=12., b1=0.9, b2=0.9, c=0.35, prng=None):
"""
Returns a (normally) perturbed HRF.
Defaults are the canonical parameters. Degree of perturbation
can be rescaled by weight.
"""
params, prng = _preturb(
weight, width=32, TR=1, a1=6.0, a2=12., b1=0.9,
b2=0.9, c=0.35, prng=None
)
return double_gamma(**params), prng
def fir(events, bold, window_size=30):
raise NotImplementedError("Create me")
| {
"repo_name": "parenthetical-e/modelmodel",
"path": "hrf.py",
"copies": "1",
"size": "1556",
"license": "bsd-2-clause",
"hash": 4385524596475131000,
"line_mean": 24.9333333333,
"line_max": 100,
"alpha_frac": 0.5893316195,
"autogenerated": false,
"ratio": 2.5976627712854756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8593080106685591,
"avg_score": 0.018782856819976816,
"num_lines": 60
} |
"""A collection of hemodynamic response functions."""
import numpy as np
import scipy.stats as stats
def double_gamma(width=32, TR=1, a1=6.0, a2=12., b1=0.9, b2=0.9, c=0.35):
"""
Returns a HRF. Defaults are the canonical parameters.
"""
x_range = np.arange(0,width,TR)
d1 = a1*b1
d2 = a2*b2
# Vectorized
hrf = ((x_range / d1) ** a1 * np.exp((d1 - x_range) / b1)) - (
c * (x_range / d2) ** a2 *np.exp((d2 - x_range) / b2))
return hrf
def preturb_canonical(fraction, width, TR, prng=None):
"""
Add scaled (by <fraction> (0-1)) white noise to a randomly selected
canonical double gamma HRF parameter. Returns a dict of new HRF
parameters, all but the perturbed one match canonical values. It also
returns a numpy RandomState() object.
If a RandomState() was passed via prng it is returned, having been used for
sampling by this function. If <prng> was None or a number, a RandomState()
was created and is (also) now returned.
<width> and <TR> are not used here but are needed for HRF calculations
downstream.
"""
prng = process_prng(prng)
np.random.set_state(prng.get_state())
## Pass random state from prng to np so that
## stats.<> will inherit the right state.
## There does not seem to be a way to set random
## state of stats.* functions directly.
params = {a1:6.0, a2:12.0, b1:0.9, b2:0.9, c:0.35}
## The conanoical parameters
keys = params.keys()
np.random.shuffle(keys)
par = params[keys[0]]
params[keys[0]] = stats.norm.rvs(loc=par, scale=par/(1.*fraction))
## Grab a random value from the normal curve
## with its SD reduced by 0.fraction
params['width'] = width
params['TR'] = TR
## Add the remaining (unpreturbed) params
prng.set_state(np.random.get_state())
## Pass the seed state from np back to
## prng, then we can use prng again...
return params, prng
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "hrf.py",
"copies": "1",
"size": "2066",
"license": "bsd-2-clause",
"hash": -6016286430809220000,
"line_mean": 30.7846153846,
"line_max": 80,
"alpha_frac": 0.6030977735,
"autogenerated": false,
"ratio": 3.398026315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45011240892894744,
"avg_score": null,
"num_lines": null
} |
## A collection of important functions
#----------------------------------------------------------------------
# The idea is to collect all tasks in separate (small) functions to
# clearly arange the code.
## Import modules
#----------------------------------------------------------------------
import sys
import pdb
import numpy as np
import math as m
import my_math
import elementclass
## Function definitions:
#----------------------------------------------------------------------
# Distribution of points:
#----------------------------------------------------------------------
# Necessary for setting the vertex positions
def sq_dist(nSq, dr, dr_sq_ratio):
""" Set distribution of elements within the "square" region in
radial direction in a certain way.
Use geometric progression.
"""
fact_sq = dr_sq_ratio**(1/(nSq-1))
fact_sq_sum = 0
for i in range(0,nSq):
fact_sq_sum = fact_sq_sum + fact_sq**i
dr_sq_max = nSq*dr/fact_sq_sum
dr_sq_min = dr_sq_max*dr_sq_ratio
dr_sq = np.zeros(nSq)
for i in range(0,nSq):
dr_sq[i] = my_math.geom_prog(nSq, dr_sq_max, dr_sq_min, i)
return dr_sq
def on_dist(nR, nSq, dr, dr_sq, distri_on):
""" Set distribution of elements in radial direction along axis
within onion region in a certain way.
Use geometric progression for a small increase and then cosine for
sharp decrease of element size.
"""
n_on_1 = int(m.floor(distri_on*(nR-nSq))) # increasing region
dr_sq_min = min(dr_sq)
dr_on_interface = dr_sq_min
if (n_on_1<2):
# If it is too small then only shrink
# Note that condition of total length being (nR-nSq)*dr
# still needs to be fulfilled. Besides, we keep the first
# element's size at a certain percentage of the adjacent square region's
# elements.
# Hence, the only free parameter is the smallest element
# close to the wall.
n_on_2 = (nR-nSq)
first_shrink = 1.0
first_el = dr_sq_min*first_shrink
sum_sin = 0
for i in range(0,n_on_2):
sum_sin = sum_sin + m.sin(m.pi/2 *i/(n_on_2-1))
dr_on_wall = (dr*(nR-nSq) - n_on_2*first_el)/(sum_sin) + first_el
dr_on = np.zeros(n_on_2)
for i in range(0,n_on_2):
dr_on[i] = my_math.sin_dist(n_on_2, first_el, dr_on_wall,i)
else:
# Use increasing region first and afterwards decreasing
n_on_2 = (nR-nSq) - n_on_1 #decreasing region
dr_sq_min = min(dr_sq)
dr_on_interface = dr_sq_min
# The critical part here is to use geometric progression first and
# cosine distribution afterwards. However, the overall length needs
# to be the same as with the nominal values (nR-nSq)*dr.
# In order to fulfill this condition, the following two functions
# are needed.
def x_transition(x):
""" This function is defined by the requirement that the total length
of both onion regions needs to be (nR-nSq)*dr and we end up at r = R.
Note that the first element of the second region is not the same as
the last of the first region, i.e. both regions "share" dr_transition.
"""
sum_cos = 0
for i in range(1,n_on_2+1):
sum_cos = sum_cos + m.cos(i/(n_on_2+1)*m.pi/2)
ret = -dr*(nR-nSq) + dr_sq_min * (1-(x/dr_sq_min)**(n_on_1/(n_on_1-1)))/\
(1-(x/dr_sq_min)**(1/(n_on_1-1))) + x*sum_cos
return ret
def x_transition_prime(x):
""" First derivative of x_transition function
"""
sum_cos = 0
for i in range(1,n_on_2+1):
sum_cos = sum_cos + m.cos(i/(n_on_2+1)*m.pi/2)
ret = sum_cos + dr_sq_min * ( (-n_on_1/(n_on_1-1)*x**(n_on_1/(n_on_1-1)-1)*\
dr_sq_min**(n_on_1/(1-n_on_1))) * (1-(x/dr_sq_min)**(1/(n_on_1-1)))**(-1)+\
(1-(x/dr_sq_min)**(n_on_1/(n_on_1-1)))*(-1)*(1-(x/dr_sq_min)**(1/(n_on_1-1)))**(-2)*\
(-dr_sq_min**(1/(1-n_on_1))*1/(n_on_1-1)*x**(1/(n_on_1-1)-1)))
return ret
# Find the size of the element in between the increasing and decreasing regions
# by the requirement that the total size of the onion region is still
# (nR-nSq)*dr by using newton raphson algorithm for finding roots.
dr_on_transition = my_math.newton_raphson(dr,x_transition, x_transition_prime)
dr_on_1 = np.zeros(n_on_1) # size distribution in increasing region
for i in range(0,n_on_1):
dr_on_1[i] = my_math.geom_prog(n_on_1,dr_on_interface, dr_on_transition, i)
dr_on_2 = np.zeros(n_on_2) # size distribution in decreasing region
for i in range(0,n_on_2):
dr_on_2[i] = dr_on_transition*m.cos((i+1)/(n_on_2+1)*m.pi/2)
dr_on = np.concatenate([dr_on_1, dr_on_2])
# make sure that last element is exactly at R and not only close because
# of rounding errors
desired_radius = np.sum(dr_sq)+dr*(nR-nSq)
actual_radius = np.sum(dr_sq)+np.sum(dr_on)
if (abs(desired_radius - actual_radius)>0.1):
print('WARNING: desired radius is {0:10.5f}, but actual radius is {1:10.5f}'\
.format(desired_radius, actual_radius))
sys.exit(1)
else:
# adjust last element's size
dr_last = desired_radius - (np.sum(dr_sq[:])+np.sum(dr_on[:-1]))
dr_on[-1] = dr_last
return dr_on
# MAJOR FUNCTION for setting the vertex positions:
#----------------------------------------------------------------------
def set_vertices(elements, nR, nSq, dr, dz, dr_sq_ratio, dr_sq_int_ratio, \
stretch_sq, distri_on, a_interf, tog_r_out_const, tog_a_on_dist):
""" Set vertex location for each element.
The vertices are set in a special way.
The inner section, called "square" section is put together by
ellipses and straight lines as a modification of just a simple regular
mesh consisting of squares.
The outer part, called "onion" region, is built up by ellipses and
straight lines, too.
In the upper onion region, the semi-major axis a is decreasing each
layer outwards so that a=1 in the outermost layer which gives a circle.
The constant on the right hand side is kept constant and semi-minor
and semi-major axis are varied.
"""
# Variable definitions
ntheta = nSq*2 # number of elements in one onion layer
r_const = 1 # constant in ellipses
a_on = np.zeros(2) # semi-major axis in onion region
a_row = np.zeros(2) # semi-maj. ax. in sq. along rows
a_col = np.zeros(2) # semi-maj. ax. in sq. along colums
b_on = np.zeros(2) # semi-minor axis in onion region
b_row = np.zeros(2) # semi-min. ax. in sq. along row
b_col = np.zeros(2) # semi-min. ax. in sq. along col
slope_on = np.zeros(2) # slope of straight lines in onion region
slope_row = np.zeros(2) # slope on east and west faces
slope_col = np.zeros(2) # slope on south and north faces
y_interc = np.zeros(2) # y interception of straight lines
# in onion region
# Intersection between straight lines and ellipses
# at the interface between sq. and onion region.
# Row: east and west faces
# Col: south and north faces
x_inters_row = np.zeros(2)
x_inters_col = np.zeros(2)
y_inters_col = np.zeros(2)
y_inters_row = np.zeros(2)
# pts along wall of the pipe
pt_wall_x = np.zeros(2)
pt_wall_y = np.zeros(2)
a_test_on = np.zeros(2)
# Set distribution of elements in radial direction along axis in a certain
# way for square region
dr_sq_nominal = dr*stretch_sq # stretch el in square region
dr_sq = sq_dist(nSq, dr_sq_nominal, dr_sq_ratio)
# Set distribution of elements in radial direction along axis in a certain
# way for onion region
dr_on_nominal = (dr*nR - dr_sq_nominal*nSq)/(nR-nSq)
dr_on = on_dist(nR, nSq, dr_on_nominal, dr_sq, distri_on)
for el in elements:
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n = el.number - cross_sec*nel_cross_section
# Define streamwise location
z0 = dz*cross_sec
z1 = z0
z2 = z0
z3 = z0
z4 = dz*(cross_sec+1)
z5 = z4
z6 = z4
z7 = z4
if (n<= nSq**2):
# This is the inner, "square" section
#--------------------------------------------------
# OPEN square
#--------------------------------------------------
i = (n-1)%nSq # column number
j = int((n-1)/nSq) # row number
# Determine the semi-major and semi-minor axis for interface between "square"
# and onion section
#----------------------------------------------------------------------
a_interface = a_interf
b_interface = np.sum(dr_sq[:])
# Idea: define ellipses in the inner region such that they coincide with the
# elements in the onion region.
# OPEN def semi-major and semi-minor axis
#--------------------------------------------------
slope_row[0] = m.tan(m.pi/2*(j/ntheta)) # slope of the straight line on the bottom side
slope_row[1] = m.tan(m.pi/2*((j+1)/ntheta)) # slope of the straight line on the top side
slope_col[0] = m.tan(m.pi/2*((ntheta-i)/ntheta)) # slope of the straight line on the left side
slope_col[1] = m.tan(m.pi/2*((ntheta-i-1)/ntheta)) # slope of the straight line on the right side
b_row[0] = np.sum(dr_sq[:j]) # small semi-minor axis
b_row[1] = np.sum(dr_sq[:j+1]) # large semi-minor axis
a_col[0] = np.sum(dr_sq[:i]) # small semi-major axis
a_col[1] = np.sum(dr_sq[:i+1]) # large semi-major axis
# Set distribution of points along intersection
# Shrink elements when approaching northeast corner of square region
dr_sq_inters_ratio = dr_sq_int_ratio
x_inters_corner = my_math.intersec_ellip_ellip(a_interface, b_interface, r_const**2,\
b_interface, a_interface, r_const**2)
dr_sq_inters_nominal = x_inters_corner/nSq
# distribution of elements along northern interface
x_inters_col_dist = sq_dist(nSq, dr_sq_inters_nominal, dr_sq_inters_ratio)
x_inters_col[0] = np.sum(x_inters_col_dist[:i])
x_inters_col[1] = np.sum(x_inters_col_dist[:i+1])
y_inters_col[0] = my_math.ellipse(a_interface, b_interface, r_const**2, x_inters_col[0])
y_inters_col[1] = my_math.ellipse(a_interface, b_interface, r_const**2, x_inters_col[1])
# distribution of elements along eastern row
y_inters_row_dist = x_inters_col_dist
y_inters_row[0] = np.sum(y_inters_row_dist[:j])
y_inters_row[1] = np.sum(y_inters_row_dist[:j+1])
x_inters_row[0] = my_math.ellipse(a_interface, b_interface, r_const**2, y_inters_row[0])
x_inters_row[1] = my_math.ellipse(a_interface, b_interface, r_const**2, y_inters_row[1])
# Find semi-major axis by the points at the intersection, r_const and semi-minor axis,
# which is defined by the vertical position
if (j==0):
a_row[0] = 0 # this is reset later
a_row[1] = x_inters_row[1]*b_row[1]/( (r_const**2*b_row[1]**2 - y_inters_row[1]**2)**0.5 )
else:
a_row[0] = x_inters_row[0]*b_row[0]/( (r_const**2*b_row[0]**2 - y_inters_row[0]**2)**0.5 )
a_row[1] = x_inters_row[1]*b_row[1]/( (r_const**2*b_row[1]**2 - y_inters_row[1]**2)**0.5 )
# Find semi-minor axis by the points at the intersection, r_const and semi-major axis,
# which is defined by the horizontal position
# note that x and y need to be switched here
if (i==0):
b_col[0] = 0 # this is reset later
b_col[1] = y_inters_col[1]*a_col[1]/( (r_const**2*a_col[1]**2 - x_inters_col[1]**2)**0.5 )
else:
b_col[0] = y_inters_col[0]*a_col[0]/( (r_const**2*a_col[0]**2 - x_inters_col[0]**2)**0.5 )
b_col[1] = y_inters_col[1]*a_col[1]/( (r_const**2*a_col[1]**2 - x_inters_col[1]**2)**0.5 )
# CLOSE def semi-major and semi-minor axis
#--------------------------------------------------
# Set vertex position and curvature
if (j==0): # first row
if (i==0): # first col
x0 = np.sum(dr_sq[:i])
x1 = np.sum(dr_sq[:i+1])
x2 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[1],b_col[1],r_const**2)
x3 = np.sum(dr_sq[:i])
y0 = np.sum(dr_sq[:j])
y1 = np.sum(dr_sq[:j])
y2 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x2)
y3 = np.sum(dr_sq[:j+1])
# use the midpoint between two vertices to calculate the curvature
c0 = 0
# note that for c1 and c2 we use the midpoint of y-coordinates and
# switch semi-major and semi-minor axis so that symmetry for
# the curvature is preserved, i.e. same curvature at interface
# sq-onion in upper part and sq-onion lower part.
c1 = my_math.get_rad_ell(b_col[1],a_col[1],r_const**2, (y1+y2)/2)
c2 = my_math.get_rad_ell(a_row[1],b_row[1],r_const**2, (x2+x3)/2)
c3 = 0
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([c0, c1, c2, c3, c4, c5, c6, c7])
else:
x0 = np.sum(dr_sq[:i])
x1 = np.sum(dr_sq[:i+1])
x2 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[1],b_col[1],r_const**2)
x3 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[0],b_col[0],r_const**2)
y0 = np.sum(dr_sq[:j])
y1 = np.sum(dr_sq[:j])
y2 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x2)
y3 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x3)
c0 = 0
c1 = my_math.get_rad_ell(b_col[1],a_col[1],r_const**2, (y1+y2)/2)
c2 = my_math.get_rad_ell(a_row[1],b_row[1],r_const**2, (x2+x3)/2)
c3 = my_math.get_rad_ell(b_col[0],a_col[0],r_const**2, (y0+y3)/2)
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([c0, c1, c2, -c3, c4, c5, c6, -c7])
elif (j>0 and i==0): # first col
x0 = np.sum(dr_sq[:i])
x1 = my_math.intersec_ellip_ellip(a_row[0],b_row[0],r_const**2,\
a_col[1],b_col[1],r_const**2)
x2 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[1],b_col[1],r_const**2)
x3 = np.sum(dr_sq[:i])
y0 = np.sum(dr_sq[:j])
y1 = my_math.ellipse(a_row[0],b_row[0],r_const**2,x1)
y2 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x2)
y3 = np.sum(dr_sq[:j+1])
c0 = my_math.get_rad_ell(a_row[0],b_row[0],r_const**2, (x0+x1)/2)
c1 = my_math.get_rad_ell(b_col[1],a_col[1],r_const**2, (y1+y2)/2)
c2 = my_math.get_rad_ell(a_row[1],b_row[1],r_const**2, (x2+x3)/2)
c3 = 0
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([-c0, c1, c2, c3, -c4, c5, c6, c7])
elif (i> 0 and j>0): # inside
#find intersection between both ellipses
x0 = my_math.intersec_ellip_ellip(a_row[0],b_row[0],r_const**2,\
a_col[0],b_col[0],r_const**2)
x1 = my_math.intersec_ellip_ellip(a_row[0],b_row[0],r_const**2,\
a_col[1],b_col[1],r_const**2)
x2 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[1],b_col[1],r_const**2)
x3 = my_math.intersec_ellip_ellip(a_row[1],b_row[1],r_const**2,\
a_col[0],b_col[0],r_const**2)
y0 = my_math.ellipse(a_row[0],b_row[0],r_const**2,x0)
y1 = my_math.ellipse(a_row[0],b_row[0],r_const**2,x1)
y2 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x2)
y3 = my_math.ellipse(a_row[1],b_row[1],r_const**2,x3)
c0 = my_math.get_rad_ell(a_row[0],b_row[0],r_const**2, (x0+x1)/2)
c1 = my_math.get_rad_ell(b_col[1],a_col[1],r_const**2, (y1+y2)/2)
c2 = my_math.get_rad_ell(a_row[1],b_row[1],r_const**2, (x2+x3)/2)
c3 = my_math.get_rad_ell(b_col[0],a_col[0],r_const**2, (y0+y3)/2)
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([-c0, c1, c2, -c3, -c4, c5, c6, -c7])
else:
sys.exit(1)
#--------------------------------------------------
# END square
#--------------------------------------------------
else:
# This is the outer, "onion" section
#--------------------------------------------------
# OPEN onion
#--------------------------------------------------
i = ((n-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first,
# starting from j=0
# Determine the semi-major and semi-minor axis for "onion" section
#----------------------------------------------------------------------
a_wall = 0.5 # semi-major axis at last layer (wall)
# Semi minor-axis:
b_on[0] = np.sum(dr_sq)+np.sum(dr_on[:j])
b_on[1] = np.sum(dr_sq)+np.sum(dr_on[:j+1])
# Difference between prescribed semi-major axis at interface square-onion
# and semi-minor axis at the interface
a_diff = a_interface - np.sum(dr_sq)
# Toggle for outermost layer having constant radial size
if (tog_r_out_const == 1):
# Set semi-major axis decreasing from the interface value to the last value
# of semi-minor axis before the wall and finally to a_wall.
# This ensures that the outermost onion layer has a constant radial size.
b_last = np.sum(dr_sq)+np.sum(dr_on[:-1])
if (j < nR-nSq-1): # we are in the inner layers
a_on[0] = b_on[0] + a_diff*my_math.sin_dist(nR-nSq, 1, 0, j)
a_on[1] = b_on[1] + a_diff*my_math.sin_dist(nR-nSq, 1, 0, j+1)
else: # we are in the outermost layers
a_on[0] = b_last
a_on[1] = a_wall
else:
if (tog_a_on_dist == 0):
a_on[0] = b_on[0] + a_diff*my_math.exp_dist(nR-nSq+1, 1, 0, j)
a_on[1] = b_on[1] + a_diff*my_math.exp_dist(nR-nSq+1, 1, 0, j+1)
elif (tog_a_on_dist == 1):
a_on[0] = b_on[0] + a_diff*my_math.sin_dist(nR-nSq+1, 1, 0, j)
a_on[1] = b_on[1] + a_diff*my_math.sin_dist(nR-nSq+1, 1, 0, j+1)
elif (tog_a_on_dist == 2):
a_on[0] = b_on[0] + a_diff*my_math.lin_decay(nR-nSq+1, 1, 0, j)
a_on[1] = b_on[1] + a_diff*my_math.lin_decay(nR-nSq+1, 1, 0, j+1)
# Straight line defined by points on intersection square-onion and equidistantly
# spaced points along circumference
# OPEN
#------------------------------
pt_wall_x[0] = nR*dr*m.cos(m.pi/2*(k/ntheta))
pt_wall_y[0] = nR*dr*m.sin(m.pi/2*(k/ntheta))
pt_wall_x[1] = nR*dr*m.cos(m.pi/2*((k+1)/ntheta))
pt_wall_y[1] = nR*dr*m.sin(m.pi/2*((k+1)/ntheta))
# Get points along intersection for each element in onion region
# Note that i and k are different than in square region
dr_sq_inters_ratio = dr_sq_int_ratio
x_inters_corner = my_math.intersec_ellip_ellip(a_interface, b_interface, r_const**2,\
b_interface, a_interface, r_const**2)
dr_sq_inters_nominal = x_inters_corner/nSq
x_inters_col_dist = sq_dist(nSq, dr_sq_inters_nominal, dr_sq_inters_ratio)
x_inters_col[0] = np.sum(x_inters_col_dist[:i])
x_inters_col[1] = np.sum(x_inters_col_dist[:i+1])
y_inters_col[0] = my_math.ellipse(a_interface, b_interface, r_const**2, x_inters_col[0])
y_inters_col[1] = my_math.ellipse(a_interface, b_interface, r_const**2, x_inters_col[1])
y_inters_row_dist = x_inters_col_dist
y_inters_row[0] = np.sum(y_inters_row_dist[:k])
y_inters_row[1] = np.sum(y_inters_row_dist[:k+1])
x_inters_row[0] = my_math.ellipse(a_interface, b_interface, r_const**2, y_inters_row[0])
x_inters_row[1] = my_math.ellipse(a_interface, b_interface, r_const**2, y_inters_row[1])
if (i<nSq): # upper onion part
# slope of the straight line on the right side of the element
slope_on[0], y_interc[0] = my_math.get_line_params(pt_wall_x[0], pt_wall_y[0],\
x_inters_col[1], y_inters_col[1])
# slope of the straight line on the left side of the element
slope_on[1], y_interc[1] = my_math.get_line_params(pt_wall_x[1], pt_wall_y[1],\
x_inters_col[0], y_inters_col[0])
else: # lower onion part
# slope of the straight line on the bottom side of the element
slope_on[0], y_interc[0] = my_math.get_line_params(pt_wall_x[0], pt_wall_y[0],\
x_inters_row[0], y_inters_row[0])
# slope of the straight line on the top side of the element
slope_on[1], y_interc[1] = my_math.get_line_params(pt_wall_x[1], pt_wall_y[1],\
x_inters_row[1], y_inters_row[1])
#------------------------------
# END
# Definition of straight lines
# Set vertex position in onion region as well as curvature
if (i <= (nSq-1)): # upper part, including border /
x0 = my_math.intersec_ellip_line(a_on[0],b_on[0],r_const**2,slope_on[1],y_interc[1])
x1 = my_math.intersec_ellip_line(a_on[0],b_on[0],r_const**2,slope_on[0],y_interc[0])
x2 = my_math.intersec_ellip_line(a_on[1],b_on[1],r_const**2,slope_on[0],y_interc[0])
x3 = my_math.intersec_ellip_line(a_on[1],b_on[1],r_const**2,slope_on[1],y_interc[1])
y0 = my_math.ellipse(a_on[0],b_on[0],r_const**2,x0)
y1 = my_math.ellipse(a_on[0],b_on[0],r_const**2,x1)
y2 = my_math.ellipse(a_on[1],b_on[1],r_const**2,x2)
y3 = my_math.ellipse(a_on[1],b_on[1],r_const**2,x3)
c0 = my_math.get_rad_ell(a_on[0],b_on[0],r_const**2, (x0+x1)/2)
c1 = 0
c2 = my_math.get_rad_ell(a_on[1],b_on[1],r_const**2, (x2+x3)/2)
c3 = 0
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([-c0, c1, c2, c3, -c4, c5, c6, c7])
elif (i >= nSq): # lower part, including border /
# note that semi-major and semi-minor axis are switched
x0 = my_math.intersec_ellip_line(b_on[0],a_on[0],r_const**2,slope_on[0],y_interc[0])
x1 = my_math.intersec_ellip_line(b_on[1],a_on[1],r_const**2,slope_on[0],y_interc[0])
x2 = my_math.intersec_ellip_line(b_on[1],a_on[1],r_const**2,slope_on[1],y_interc[1])
x3 = my_math.intersec_ellip_line(b_on[0],a_on[0],r_const**2,slope_on[1],y_interc[1])
y0 = my_math.line(slope_on[0],x0,y_interc[0])
y1 = my_math.line(slope_on[0],x1,y_interc[0])
y2 = my_math.line(slope_on[1],x2,y_interc[1])
y3 = my_math.line(slope_on[1],x3,y_interc[1])
c0 = 0
# Note that we use y-coordinates midpoint again as above.
# We do not need to switch semi-major and semi-minor axis.
c1 = my_math.get_rad_ell(a_on[1],b_on[1],r_const**2, (y1+y2)/2)
c2 = 0
c3 = my_math.get_rad_ell(a_on[0],b_on[0],r_const**2, (y0+y3)/2)
# Account for 3D
x4 = x0
x5 = x1
x6 = x2
x7 = x3
y4 = y0
y5 = y1
y6 = y2
y7 = y3
c4 = c0
c5 = c1
c6 = c2
c7 = c3
el.x = np.array([x0, x1, x2, x3, x4, x5, x6, x7])
el.y = np.array([y0, y1, y2, y3, y4, y5, y6, y7])
el.z = np.array([z0, z1, z2, z3, z4, z5, z6, z7])
# Set concave sides to negative and convex positive
el.c = np.array([c0, c1, c2, -c3, c4, c5, c6, -c7])
def compl_mesh(elements, nR, nSq):
""" Complete the quarter mesh to a whole cross section """
# Mirror the first quarter along y-axis to create mesh in 2nd quadrant
# then for 3rd and finally 4th quadrant
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
el_list_2nd = [] # list for the elements in the 2nd quadrant
el_list_3rd = [] # list for the elements in the 3rd quadrant
el_list_4th = [] # list for the elements in the 4th quadrant
for el in elements:
check_position(el,nR,nSq)
# First, create elements in 2nd quadrant
# Define the mirrored element
mirr_el = elementclass.Element()
# Element number
mirr_el.number = el.number+nel_quarter
# Vertices
# Note that vertex numbering needs to be adjusted so that "right-handed" elements are created
mirr_el.x = el.x*(-1)
# swap values to get right handed element
mirr_el.x = np.array([mirr_el.x[1], mirr_el.x[0], mirr_el.x[3], mirr_el.x[2],\
mirr_el.x[5], mirr_el.x[4], mirr_el.x[7], mirr_el.x[6]])
mirr_el.y = el.y
mirr_el.y = np.array([mirr_el.y[1], mirr_el.y[0], mirr_el.y[3], mirr_el.y[2],\
mirr_el.y[5], mirr_el.y[4], mirr_el.y[7], mirr_el.y[6]])
mirr_el.z = el.z
# Position
mirr_el.pos = el.pos
# Curvature
mirr_el.c = np.array([el.c[0],el.c[3],el.c[2],el.c[1],\
el.c[4],el.c[7],el.c[6],el.c[5]])
# Add mirrored element to the list of elements
el_list_2nd.append(mirr_el)
for el in elements:
# Second, create elements in third quadrant
# Define the mirrored element
mirr_el = elementclass.Element()
# Element number
mirr_el.number = el.number+nel_quarter*2
# Vertices
# Note that vertex numbering needs to be adjusted so that "right-handed" elements are created
mirr_el.x = el.x*(-1)
# swap values to get right handed element
mirr_el.x = np.array([mirr_el.x[2], mirr_el.x[3], mirr_el.x[0], mirr_el.x[1],\
mirr_el.x[6], mirr_el.x[7], mirr_el.x[4], mirr_el.x[5]])
mirr_el.y = el.y*(-1)
mirr_el.y = np.array([mirr_el.y[2], mirr_el.y[3], mirr_el.y[0], mirr_el.y[1],\
mirr_el.y[6], mirr_el.y[7], mirr_el.y[4], mirr_el.y[5]])
mirr_el.z = el.z
# Position
mirr_el.pos = el.pos
# Curvature
mirr_el.c = np.array([el.c[2], el.c[3], el.c[0], el.c[1],\
el.c[6], el.c[7], el.c[4], el.c[5]])
# Add mirrored element to the list of elements
el_list_3rd.append(mirr_el)
for el in elements:
# Third, create elements in fourth quadrant
# Define the mirrored element
mirr_el = elementclass.Element()
# Element number
mirr_el.number = el.number+nel_quarter*3
# Vertices
# Note that vertex numbering needs to be adjusted so that "right-handed" elements are created
mirr_el.x = el.x
# swap values to get right handed element
mirr_el.x = np.array([mirr_el.x[3], mirr_el.x[2], mirr_el.x[1], mirr_el.x[0],\
mirr_el.x[7], mirr_el.x[6], mirr_el.x[5], mirr_el.x[4]])
mirr_el.y = el.y*(-1)
mirr_el.y = np.array([mirr_el.y[3], mirr_el.y[2], mirr_el.y[1], mirr_el.y[0],\
mirr_el.y[7], mirr_el.y[6], mirr_el.y[5], mirr_el.y[4]])
mirr_el.z = el.z
# Position
mirr_el.pos = el.pos
# Curvature
mirr_el.c = np.array([el.c[2], el.c[1], el.c[0], el.c[3],\
el.c[6], el.c[5], el.c[4], el.c[7]])
# Add mirrored element to the list of elements
el_list_4th.append(mirr_el)
# Add all new elements to the element list
elements.extend(el_list_2nd)
elements.extend(el_list_3rd)
elements.extend(el_list_4th)
def extrude(elements,nR,nSq,nZ,dz):
""" Set vertex positions, number, position of the element, and curvature for 3D
"""
el_list_section = [] # For collection the newly created elements
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
# Generate nZ-1 new cross sections
for z in range(1,nZ):
# Loop over all elements in the first cross section
for el in elements:
# Define an extruded element
ext_el = elementclass.Element()
# Number
ext_el.number = el.number + nel_cross_section*z
# Vertices
ext_el.x = el.x
ext_el.y = el.y
ext_el.z = el.z+(dz*z)
# Position
ext_el.pos = el.pos
# Curvature
ext_el.c = el.c
el_list_section.append(ext_el)
elements.extend(el_list_section)
def set_per_bc(elements, n, cross_sec, n_in_cross, nel_cross_section):
""" Set periodic boundary conditions at front and back sides and their
corresponding connected elements and faces.
elements : list of elements
n : current element number
cross_sec : current cross section number
n_in_cross : current number of element within cross section
nel_cross_section : total number of elements in each cross section
"""
# front means on face 5
# back means on face 6
if (cross_sec == 0 ): # first cross section
bc_front = 'P '
bc_back = 'E '
el_front = elements[-1].number - nel_cross_section + n_in_cross
el_back = n_in_cross + nel_cross_section
f_front = 6
f_back = 5
elif (cross_sec == int((elements[-1].number-1)/nel_cross_section)): # final cross section
bc_front = 'E '
bc_back = 'P '
el_front = n - nel_cross_section
el_back = n_in_cross
f_front = 6
f_back = 5
else:
bc_front = 'E '
bc_back = 'E '
el_front = n - nel_cross_section
el_back = n + nel_cross_section
f_front = 6
f_back = 5
return (bc_front, bc_back, el_front, el_back, f_front, f_back)
# Set the boundary conditions for each quadrant separately by checking the element's
# position and writing the corresponding BCs into the element's attributes.
# (This might be a possible cause of bugs since each BC is written manually
# depending on its location. Even though I was very careful, this approach is prone to errors.)
def set_bc_q1(elements,nR,nSq,th_bc_type):
""" Set boundary conditions for each face.
This is for quadrant 1.
"""
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
for el in elements:
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n_in_cross = el.number - cross_sec*nel_cross_section
n = el.number
(bc_front, bc_back, el_front, el_back, f_front, f_back) = \
set_per_bc(elements, n, cross_sec, n_in_cross, nel_cross_section)
# only consider elements in the first quadrant
if (n_in_cross<=nel_quarter):
position = el.pos
if (n_in_cross <= nSq**2): # we are in the square section
i = (n_in_cross-1)%nSq # column number
j = int((n_in_cross-1)/nSq) # row number
if (position == 'sq_low_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, n+1, n+nSq, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_low_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+nSq, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_north_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_east_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_south_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, n+1, n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_west_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+nSq, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_internal'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position of element not found!')
sys.exit(1)
else: # we are in the outer onion like region :-)
i = ((n_in_cross-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n_in_cross-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first
if ('on_up' in position): # we are in the upper onion part
if (position == 'on_up_south_sq_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+2*nSq, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_south_sq_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 3, 1, 2, f_front, f_back])
elif (position == 'on_up_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, n+2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 3, 1, 2, f_front, f_back])
elif (position == 'on_up_south_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n+1, n+2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y_north_wall'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, 0, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 0, 2, f_front, f_back])
elif (position == 'on_up_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, n+2*nSq, n+nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_north_wall_east_edge'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, 0, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 3, 0, 2, f_front, f_back])
elif (position == 'on_up_north_wall'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, 0, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 0, 2, f_front, f_back])
elif (position == 'on_up_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n+1, n+2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif ('on_low' in position): # we are in the lower onion part
if (position == 'on_low_west_sq_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, n+nSq*2, n-1, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_west_sq_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n+nSq*2, n-1, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([3, 4, 2, 2, f_front, f_back])
elif (position == 'on_low_west_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n+nSq*2, n-1, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_south_x_east_wall'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, 0, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 0, 1, 2, f_front, f_back])
elif (position == 'on_low_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter*3, n+nSq*2, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_east_wall_north_edge'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, 0, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 0, 2, 2, f_front, f_back])
elif (position == 'on_low_east_wall'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, 0, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 0, 1, 2, f_front, f_back])
elif (position == 'on_low_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n+nSq*2, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 2, 2, f_front, f_back])
elif (position == 'on_low_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n+nSq*2, n-1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position assignment was not correct!')
sys.exit(3)
def set_bc_q2(elements,nR,nSq,th_bc_type):
""" Set boundary conditions for each face.
This is for quadrant 2.
"""
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
for el in elements:
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n_in_cross = el.number - cross_sec*nel_cross_section
n = el.number
(bc_front, bc_back, el_front, el_back, f_front, f_back) = \
set_per_bc(elements, n, cross_sec, n_in_cross, nel_cross_section)
# only consider elements in the second quadrant
if (n_in_cross>nel_quarter and n_in_cross<=2*nel_quarter):
position = el.pos
if (n_in_cross-nel_quarter <= nSq**2): # we are in the square section
i = (n_in_cross-nel_quarter-1)%nSq # column number
j = int((n_in_cross-nel_quarter-1)/nSq) # row number
if (position == 'sq_low_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-nel_quarter, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_low_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-1, n+nSq, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-nel_quarter, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+nSq, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_north_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_east_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+nSq, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_south_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-1, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_west_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-nel_quarter, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_internal'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position of element not found!')
sys.exit(1)
else: # we are in the outer onion like region :-)
i = ((n_in_cross-nel_quarter-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n_in_cross-nel_quarter-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first
if ('on_up' in position): # we are in the upper onion part
if (position == 'on_up_south_sq_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-nel_quarter, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_south_sq_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 3, f_front, f_back])
elif (position == 'on_up_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-1, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 3, f_front, f_back])
elif (position == 'on_up_south_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-nSq, n-1, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y_north_wall'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-nel_quarter, 0, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 0, 2, f_front, f_back])
elif (position == 'on_up_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-nel_quarter, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_north_wall_east_edge'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-1, 0, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 0, 3, f_front, f_back])
elif (position == 'on_up_north_wall'):
el.fl_bc = ['E ','E ','W ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ',th_bc_type,'E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-1, 0, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 0, 2, f_front, f_back])
elif (position == 'on_up_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-2*nSq, n-1, n+2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif ('on_low' in position): # we are in the lower onion part
if (position == 'on_low_west_sq_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-(i+1)-(nSq-(k+1))*nSq, \
n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_west_sq_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-(i+1)-(nSq-(k+1))*nSq, \
n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 4, 2, f_front, f_back])
elif (position == 'on_low_west_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-(i+1)-(nSq-(k+1))*nSq, \
n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_south_x_east_wall'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-nSq*2, n-1, 0,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 0, f_front, f_back])
elif (position == 'on_low_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nel_quarter, n-nSq*2, n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_east_wall_north_edge'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-nSq*2, n-1, 0,el_front,el_back])
el.bc_con_f = np.array([3, 4, 4, 0, f_front, f_back])
elif (position == 'on_low_east_wall'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-nSq*2, n-1, 0,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 0, f_front, f_back])
elif (position == 'on_low_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-nSq*2, n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 4, 2, f_front, f_back])
elif (position == 'on_low_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+1, n-nSq*2, n-1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position assignment was not correct!')
sys.exit(3)
def set_bc_q3(elements,nR,nSq,th_bc_type):
""" Set boundary conditions for each face.
This is for quadrant 3.
"""
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
for el in elements:
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n_in_cross = el.number - cross_sec*nel_cross_section
n = el.number
(bc_front, bc_back, el_front, el_back, f_front, f_back) = \
set_per_bc(elements, n, cross_sec, n_in_cross, nel_cross_section)
# only consider elements in the third quadrant
if (n_in_cross>2*nel_quarter and n_in_cross<=3*nel_quarter):
position = el.pos
if (n_in_cross-nel_quarter*2 <= nSq**2): # we are in the square section
i = (n_in_cross-nel_quarter*2-1)%nSq # column number
j = int((n_in_cross-nel_quarter*2-1)/nSq) # row number
if (position == 'sq_low_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+nel_quarter, n-nel_quarter, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_low_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nel_quarter, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+nel_quarter, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nSq, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_north_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_east_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nSq, \
n+(nSq**2-(j+1)*nSq)+(2*nSq-j),el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_south_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nel_quarter, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_west_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+nel_quarter, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_internal'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n-1, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position of element not found!')
sys.exit(1)
else: # we are in the outer onion like region :-)
i = ((n_in_cross-nel_quarter*2-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n_in_cross-nel_quarter*2-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first
if ('on_up' in position): # we are in the upper onion part
if (position == 'on_up_south_sq_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+nel_quarter, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_south_sq_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n-1, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 1, f_front, f_back])
elif (position == 'on_up_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n-1, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 1, f_front, f_back])
elif (position == 'on_up_south_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n-1, n-nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y_north_wall'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n+nel_quarter, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([0, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+nel_quarter, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_north_wall_east_edge'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n-1, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([0, 4, 1, 1, f_front, f_back])
elif (position == 'on_up_north_wall'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n-1, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([0, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n-1, n-2*nSq, n+1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif ('on_low' in position): # we are in the lower onion part
if (position == 'on_low_west_sq_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-(i+1)-(nSq-(k+1))*nSq, \
n-nel_quarter, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_west_sq_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-(i+1)-(nSq-(k+1))*nSq, \
n+1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([4, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_west_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-(i+1)-(nSq-(k+1))*nSq, \
n+1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_south_x_east_wall'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n-nel_quarter, 0,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 0, f_front, f_back])
elif (position == 'on_low_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n-nel_quarter, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_east_wall_north_edge'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n+1, 0,el_front,el_back])
el.bc_con_f = np.array([4, 4, 1, 0, f_front, f_back])
elif (position == 'on_low_east_wall'):
el.fl_bc = ['E ','E ','E ','W ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ',th_bc_type,bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n+1, 0,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 0, f_front, f_back])
elif (position == 'on_low_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n+1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([4, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n-nSq*2, n+1, n+nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position assignment was not correct!')
sys.exit(3)
def set_bc_q4(elements,nR,nSq,th_bc_type):
""" Set boundary conditions for each face.
This is for quadrant 4.
"""
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
for el in elements:
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n_in_cross = el.number - cross_sec*nel_cross_section
n = el.number
(bc_front, bc_back, el_front, el_back, f_front, f_back) = \
set_per_bc(elements, n, cross_sec, n_in_cross, nel_cross_section)
# only consider elements in the fourth quadrant
if (n_in_cross>3*nel_quarter and n_in_cross<=4*nel_quarter):
position = el.pos
if (n_in_cross-nel_quarter*3 <= nSq**2): # we are in the square section
i = (n_in_cross-nel_quarter*3-1)%nSq # column number
j = int((n_in_cross-nel_quarter*3-1)/nSq) # row number
if (position == 'sq_low_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nel_quarter*3, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_low_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n-nel_quarter*3, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_left'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nSq, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_up_right'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_north_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_east_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+(nSq**2-(j+1)*nSq)+(2*nSq-j), \
n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_south_row'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nel_quarter*3, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_west_col'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nSq, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'sq_internal'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+nSq, n+1, n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position of element not found!')
sys.exit(1)
else: # we are in the outer onion like region :-)
i = ((n_in_cross-nel_quarter*3-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n_in_cross-nel_quarter*3-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first
if ('on_up' in position): # we are in the upper onion part
if (position == 'on_up_south_sq_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-nSq, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_south_sq_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 1, 1, 4, f_front, f_back])
elif (position == 'on_up_east_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 1, 1, 4, f_front, f_back])
elif (position == 'on_up_south_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y_north_wall'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n+1, n-2*nSq, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([0, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_west_y'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-2*nSq, n-nel_quarter,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_north_wall_east_edge'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n+1, n-2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([0, 1, 1, 4, f_front, f_back])
elif (position == 'on_up_north_wall'):
el.fl_bc = ['W ','E ','E ','E ',bc_front,bc_back]
el.th_bc = [th_bc_type,'E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([0, n+1, n-2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([0, 4, 1, 2, f_front, f_back])
elif (position == 'on_up_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n+2*nSq, n+1, n-2*nSq, n-1,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif ('on_low' in position): # we are in the lower onion part
if (position == 'on_low_west_sq_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n-nel_quarter*3, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_west_sq_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n+1, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([2, 4, 1, 4, f_front, f_back])
elif (position == 'on_low_west_sq'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n+1, n-(i+1)-(nSq-(k+1))*nSq, \
el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_south_x_east_wall'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, 0, n-nel_quarter*3, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 0, 1, 4, f_front, f_back])
elif (position == 'on_low_south_x'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n-nel_quarter*3, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_east_wall_north_edge'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, 0, n+1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([2, 0, 1, 1, f_front, f_back])
elif (position == 'on_low_east_wall'):
el.fl_bc = ['E ','W ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ',th_bc_type,'E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, 0, n+1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 0, 1, 4, f_front, f_back])
elif (position == 'on_low_north_edge'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n+1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([2, 4, 1, 2, f_front, f_back])
elif (position == 'on_low_intern'):
el.fl_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.th_bc = ['E ','E ','E ','E ',bc_front,bc_back]
el.bc_con_el = np.array([n-1, n+nSq*2, n+1, n-nSq*2,el_front,el_back])
el.bc_con_f = np.array([3, 4, 1, 2, f_front, f_back])
else:
print('position assignment was not correct!')
sys.exit(3)
def check_position(element, nR, nSq):
""" Check position of the given element within the quarter region
and save it as an element attribute.
This is needed for choosing the right boundary conditions.
"""
el = element
# Check in which cross section we are
nel_cross_section = (nSq**2+(nR-nSq)*nSq*2)*4
cross_sec = int((el.number-1)/nel_cross_section)
# Reduce current element number to be within the first cross section
n = el.number - cross_sec*nel_cross_section
# n = el.number
if (n <= nSq**2): # we are in the square section
if (n == 1 or n == nSq or n == nSq**2-nSq+1 or n == nSq**2): # corners
if (n == 1): # we are on the first element on the lower left
el.pos = 'sq_low_left'
elif (n == nSq): # we are on the lower right corner
el.pos = 'sq_low_right'
elif (n == nSq**2-nSq+1): # we are on the upper left corner
el.pos = 'sq_up_left'
elif (n == nSq**2): # last element on the upper right
el.pos = 'sq_up_right'
return
elif (n > nSq**2-nSq or n%nSq == 0 or n < nSq or n%(nSq+1) == 0): # edges
if (n > nSq**2-nSq): # northern row
el.pos = 'sq_north_row'
elif ((n%nSq) == 0): # eastern column
el.pos = 'sq_east_col'
elif (n < nSq): # southern row
el.pos = 'sq_south_row'
elif ((n%(nSq+1)) == 0): # western column
el.pos = 'sq_west_col'
return
else: # interior
el.pos = 'sq_internal'
return
else: # we are in the onion region
i = ((n-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
k = abs(i-((nSq*2)-1)) # position in anticlockwise manner
j = int(((n-1)-nSq**2)/(nSq*2)) # onion like layer number, inner one is first
if (i<nSq): ## Upper part
## 1-sided special treatment
# southern faces with square section
if (j==0):
if (i==0): # south square and west y=0
el.pos = 'on_up_south_sq_west_y'
return
elif (i==nSq-1): # south square and east edge
el.pos = 'on_up_south_sq_east_edge'
return
else:
el.pos = 'on_up_south_sq'
return
# western faces with y=0
elif (i==0):
if (j==nR-nSq-1): # western face y=0 and northern wall
el.pos = 'on_up_west_y_north_wall'
return
else:
el.pos = 'on_up_west_y'
return
# northern faces at wall
elif (j==(nR-nSq)-1):
if (i==nSq-1): # north wall east edge
el.pos = 'on_up_north_wall_east_edge'
return
else:
el.pos = 'on_up_north_wall'
return
elif (i==nSq-1): # east edge
el.pos = 'on_up_east_edge'
return
## internal upper part
else:
el.pos = 'on_up_intern'
return
elif (k<nSq): ## Lower part
# western faces with square section
if (j==0):
if (k==0): # western face square and south x=0
el.pos = 'on_low_west_sq_south_x'
return
elif (k==nSq-1): # western face square and northern edge
el.pos = 'on_low_west_sq_north_edge'
return
else:
el.pos = 'on_low_west_sq'
return
# southern faces with x=0
elif (k==0):
if (j==nR-nSq-1): # south x=0 east wall
el.pos = 'on_low_south_x_east_wall'
return
else:
el.pos = 'on_low_south_x'
return
# eastern faces at wall
elif (j==(nR-nSq)-1):
if (k==nSq-1): # east wall north edge
el.pos = 'on_low_east_wall_north_edge'
return
else:
el.pos = 'on_low_east_wall'
return
elif (k==nSq-1): # north edge
el.pos = 'on_low_north_edge'
return
## interal lower part
else:
el.pos = 'on_low_intern'
return
else:
print('Error in Position onion region.')
sys.exit(2)
def write_mesh(elements, nR, nSq, dimension, fname):
""" Write vertex locations to rea file. """
mesh = []
if (dimension>2):
n_tot = len(elements)
else:
n_tot = (nSq**2+(nR-nSq)*2*nSq)*4
elements = elements[0:n_tot]
mesh.append('{0:10d} {1:10d} {2:10d} NEL,NDIM,NELV\n'.format(n_tot,dimension,n_tot))
for el in elements: # loop through all elements
x = el.x
y = el.y
z = el.z
n = el.number
mesh.append('{0:>19s} {1:10d} {2:6s}{3:1s}{4:12s}'.format\
('ELEMENT',n,'[ 1','a','] GROUP 0\n'))
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(x[0], x[1], x[2], x[3], '\n')) # x coordinates
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(y[0], y[1], y[2], y[3], '\n')) # y coordinates
if (dimension>2):
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(z[0], z[1], z[2], z[3], '\n')) # z coordinates
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(x[4], x[5], x[6], x[7], '\n')) # x coordinates
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(y[4], y[5], y[6], y[7], '\n')) # y coordinates
mesh.append('{0: 10.6E}{1: 14.6E}{2: 14.6E}{3: 14.6E} {4:s}'.format\
(z[4], z[5], z[6], z[7], '\n')) # z coordinates
f = open(fname,'r')
contents = f.readlines()
f.close()
# find row for mesh data
line_mesh = 0
while (not 'MESH DATA' in contents[line_mesh]):
line_mesh = line_mesh + 1
# and write it to rea file
contents[line_mesh+1:line_mesh+1] = mesh
contents = "".join(contents)
f = open(fname,'w')
f.write(contents)
f.close()
def write_curv(elements, nR, nSq, dimension, fname):
""" Write curvature information to rea file.
Note that only curved sides are allowed to be printed here.
"""
if (dimension>2):
n_tot = len(elements)
n_edges = 8
else:
n_tot = (nSq**2+(nR-nSq)*2*nSq)*4
n_edges = 4
elements = elements[0:n_tot]
# Count all the curved edges
num_curv = 0
for el in elements:
for f in range(n_edges):
if (abs(el.c[f]) > 1e-15):
num_curv = num_curv+1
curv = []
curv.append('{0:10d} Curved sides follow IEDGE,IEL,CURVE(I),I=1,5, CCURVE\n'.format(num_curv))
# Check number of curved sides for correct formatting of curved side data
# (see Nek5000 user doc. p. 20)
if (n_tot < 1e3):
format_str = '{iedge:3d}{current_el:3d}{curve1:14.6f}\
{curve2:14.6f}{curve3:14.6f}{curve4:14.6f}{curve5:14.6f} {ccurve:s}\
{newline:s}'
elif (n_tot < 1e6):
format_str = '{iedge:2d}{current_el:6d}{curve1:14.6f}\
{curve2:14.6f}{curve3:14.6f}{curve4:14.6f}{curve5:14.6f} {ccurve:s}\
{newline:s}'
else:
format_str = '{iedge:2d}{current_el:12}{curve1:14.6f}\
{curve2:14.6f}{curve3:14.6f}{curve4:14.6f}{curve5:14.6f} {ccurve:s}\
{newline:s}'
for el in elements:
for f in range(n_edges):
if (abs(el.c[f]) > 1e-15):
curv.append(format_str.format(iedge=f+1, current_el=el.number,\
curve1=el.c[f],curve2=0.0,curve3=0.0,curve4=0.0,curve5=0.0,\
ccurve='C',newline='\n'))
f = open(fname,'r')
contents = f.readlines()
f.close()
# find row for curv data
line_curv = 0
while (not 'CURVED SIDE DATA' in contents[line_curv]):
line_curv = line_curv + 1
# and write it to rea file
contents[line_curv+1:line_curv+1] = curv
contents = "".join(contents)
f = open(fname,'w')
f.write(contents)
f.close()
def write_fl_bc(elements, nR, nSq, dimension, fname):
""" Write fluid boundary conditions to rea file. """
if (dimension>2):
n_tot = len(elements)
n_faces = 6
else:
n_tot = (nSq**2+(nR-nSq)*2*nSq)*4
n_faces = 4
elements = elements[0:n_tot]
# Use different format depending on number of elements
# see genmap.f
if (n_tot < 1e3):
format_str = ' {boundary:3s}{current_el:3d}{face:3d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
elif (n_tot < 1e5):
format_str = ' {boundary:3s}{current_el:5d}{face:1d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
elif (n_tot < 1e6):
format_str = ' {boundary:3s}{current_el:5d}{face:1d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
else:
format_str = ' {boundary:3s}{current_el:11}{face:1d}{con_el:18.11f}\
{con_f:18.11f}{zero1:18.11f}{zero2:18.11f}{zero3:18.11f}{newline:s}'
bc = []
# dig_n_tot = len(str(elements[-1].number)) # size of element number
for el in elements:
for f in range(n_faces):
bc.append(format_str.format(boundary=el.fl_bc[f], current_el=el.number, face=(f+1),\
con_el=el.bc_con_el[f], con_f=el.bc_con_f[f],\
zero1=0.0,zero2=0.0,zero3=0.0,newline='\n'))
f = open(fname,'r')
contents = f.readlines()
f.close()
# find row for bc data
line_bc = 0
while (not 'FLUID BOUNDARY CONDITIONS' in contents[line_bc]):
line_bc = line_bc + 1
# and write it to rea file
contents[line_bc+1:line_bc+1] = bc
contents = "".join(contents)
f = open(fname,'w')
f.write(contents)
f.close()
def write_th_bc(elements, nR, nSq, dimension, fname):
""" Write fluid boundary conditions to rea file. """
if (dimension>2):
n_tot = len(elements)
n_faces = 6
else:
n_tot = (nSq**2+(nR-nSq)*2*nSq)*4
n_faces = 4
elements = elements[0:n_tot]
# Use different format depending on number of elements
# see genmap.f
if (n_tot < 1e3):
format_str = ' {boundary:3s}{current_el:3d}{face:3d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
elif (n_tot < 1e5):
format_str = ' {boundary:3s}{current_el:5d}{face:1d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
elif (n_tot < 1e6):
format_str = ' {boundary:3s}{current_el:5d}{face:1d}{con_el:14.6f}\
{con_f:14.6f}{zero1:14.6f}{zero2:14.6f}{zero3:14.6f}{newline:s}'
else:
format_str = ' {boundary:3s}{current_el:11}{face:1d}{con_el:18.11f}\
{con_f:18.11f}{zero1:18.11f}{zero2:18.11f}{zero3:18.11f}{newline:s}'
bc = []
# dig_n_tot = len(str(elements[-1].number)) # size of element number
for el in elements:
for f in range(n_faces):
bc.append(format_str.format(boundary=el.th_bc[f], current_el=el.number, face=(f+1),\
con_el=el.bc_con_el[f], con_f=el.bc_con_f[f],\
zero1=0.0,zero2=0.0,zero3=0.0,newline='\n'))
f = open(fname,'r')
contents = f.readlines()
f.close()
# find row for bc data
line_bc = 0
while (not 'THERMAL BOUNDARY CONDITIONS' in contents[line_bc]):
line_bc = line_bc + 1
# and write it to rea file
contents[line_bc+1:line_bc+1] = bc
contents = "".join(contents)
f = open(fname,'w')
f.write(contents)
f.close()
def rea_skel(dimension, if_therm, fname):
""" Create a skeleton base.rea file. """
reafile = fname
f = open(reafile, 'w')
# write some default parameters
f.write('****** PARAMETERS ******\n')
f.write(' 2.6100 NEKTON VERSION\n')
f.write(' {:d} DIMENSIONAL RUN\n'.format(dimension))
f.write(' 118 PARAMETERS FOLLOW\n')
f.write(' 1.00000 P001: DENSITY\n')
f.write(' -5300.00 P002: VISCOSITY\n')
f.write(' 0.00000 P003: BETAG\n')
f.write(' 0.00000 P004: GTHETA\n')
f.write(' 0.00000 P005: PGRADX\n')
f.write(' 0.00000 P006: \n')
f.write(' 1.00000 P007: RHOCP\n')
f.write(' 1.00000 P008: CONDUCT\n')
f.write(' 0.00000 P009: \n')
f.write(' 0.00000 P010: FINTIME\n')
f.write(' 103.000 P011: NSTEPS\n')
f.write(' -1.00000E-03 P012: DT\n')
f.write(' 20.0000 P013: IOCOMM\n')
f.write(' 0.00000 P014: IOTIME\n')
f.write(' 50.0000 P015: IOSTEP\n')
f.write(' 0.00000 P016: PSSOLVER: 0=default\n')
f.write(' 0.00000 P017: \n')
f.write(' 0.00000 P018: GRID <0 --> # cells on screen\n')
f.write(' 0.00000 P019: INTYPE\n')
f.write(' 0.00000 P020: NORDER\n')
f.write(' 1.00000E-08 P021: DIVERGENCE\n')
f.write(' 1.00000E-08 P022: HELMHOLTZ\n')
f.write(' 0.00000 P023: NPSCAL\n')
f.write(' 1.00000E-02 P024: TOLREL\n')
f.write(' 1.00000E-02 P025: TOLABS\n')
f.write(' 0.50000 P026: COURANT/NTAU\n')
f.write(' 3.00000 P027: TORDER\n')
f.write(' 0.00000 P028: TORDER: mesh velocity (0: p28=p27)\n')
f.write(' 0.00000 P029: = magnetic visc if > 0, = -1/Rm if < 0\n')
f.write(' 0.00000 P030: > 0 ==> properties set in uservp()\n')
f.write(' 0.00000 P031: NPERT: #perturbation modes\n')
f.write(' 0.00000 P032: #BCs in re2 file, if > 0\n')
f.write(' 0.00000 P033: \n')
f.write(' 0.00000 P034: \n')
f.write(' 0.00000 P035: \n')
f.write(' 0.00000 P036: XMAGNET\n')
f.write(' 0.00000 P037: NGRIDS\n')
f.write(' 0.00000 P038: NORDER2\n')
f.write(' 0.00000 P039: NORDER3\n')
f.write(' 0.00000 P040: \n')
f.write(' 0.00000 P041: 1-->multiplicattive SEMG\n')
f.write(' 0.00000 P042: 0=gmres/1=pcg\n')
f.write(' 0.00000 P043: 0=semg/1=schwarz\n')
f.write(' 0.00000 P044: 0=E-based/1=A-based prec.\n')
f.write(' 0.00000 P045: Relaxation factor for DTFS\n')
f.write(' 0.00000 P046: reserved\n')
f.write(' 0.00000 P047: vnu: mesh material prop.\n')
f.write(' 0.00000 P048: \n')
f.write(' 0.00000 P049: \n')
f.write(' 0.00000 P050: \n')
f.write(' 0.00000 P051: \n')
f.write(' 0.00000 P052: IOHIS\n')
f.write(' 0.00000 P053: \n')
f.write(' -3.00000 P054: fixed flow rate dir: |p54|=1,2,3=x,y,z\n')
f.write(' 1.00000 P055: vol.flow rate (p54>0) or Ubar (p54<0)\n')
f.write(' 0.00000 P056: \n')
f.write(' 0.00000 P057: \n')
f.write(' 0.00000 P058: \n')
f.write(' 0.00000 P059: !=0 --> full Jac. eval. for each el.\n')
f.write(' 0.00000 P060: !=0 --> init. velocity to small nonzero\n')
f.write(' 0.00000 P061: \n')
f.write(' 0.00000 P062: >0 --> force byte_swap for output\n')
f.write(' 0.00000 P063: =8 --> force 8-byte output\n')
f.write(' 0.00000 P064: =1 --> perturbation restart\n')
f.write(' 1.00000 P065: #iofiles (eg, 0 or 64); <0 --> sep. dirs\n')
f.write(' 6.00000 P066: output : <0=ascii, else binary\n')
f.write(' 6.00000 P067: restart: <0=ascii, else binary\n')
f.write(' 10.0000 P068: STAT_COMP: how often you compute stats\n')
f.write(' 50.0000 P069: STAT_OUTP: how often you write stats\n')
f.write(' 50.0000 P070: CHKPTSTP: how often you write restart files (rs8)\n')
f.write(' 0.00000 P071: IFCHKPTRST: restart (1) or start from inital cond.\n')
f.write(' 0.00000 P072: \n')
f.write(' 0.00000 P073: \n')
f.write(' 0.00000 P074: \n')
f.write(' 0.00000 P075: \n')
f.write(' 0.00000 P076: \n')
f.write(' 0.00000 P077: \n')
f.write(' 0.00000 P078: \n')
f.write(' 0.00000 P079: \n')
f.write(' 0.00000 P080: \n')
f.write(' 0.00000 P081: \n')
f.write(' 0.00000 P082: \n')
f.write(' 0.00000 P083: \n')
f.write(' 0.00000 P084: != 0 --> sets initial timestep if p12>0\n')
f.write(' 0.00000 P085: dt retio of p84 !=0, for timesteps>0\n')
f.write(' 0.00000 P086: reserved\n')
f.write(' 0.00000 P087: \n')
f.write(' 0.00000 P088: \n')
f.write(' 0.00000 P089: \n')
f.write(' 0.00000 P090: \n')
f.write(' 0.00000 P091: \n')
f.write(' 0.00000 P092: \n')
f.write(' 20.0000 P093: Number of previous pressure solns saved\n')
f.write(' 9.00000 P094: start projecting velocity after p94 step\n')
f.write(' 9.00000 P095: start projecting pressure after p95 step\n')
f.write(' 0.00000 P096: \n')
f.write(' 0.00000 P097: \n')
f.write(' 0.00000 P098: \n')
f.write(' 4.00000 P099: dealiasing: <0--> off /3--> old /4-->new\n')
f.write(' 0.00000 P100: \n')
f.write(' 0.00000 P101: Number of additional modes to filter\n')
f.write(' 1.00000 P102: Dump out divergence at each time step\n')
f.write(' 0.01000 P103: weight of stabilizing filter\n')
f.write(' 0.00000 P104: \n')
f.write(' 0.00000 P105: \n')
f.write(' 0.00000 P106: \n')
f.write(' 0.00000 P107: !=0 --> add h2 array in hmholtz eqn\n')
f.write(' 0.00000 P108: \n')
f.write(' 0.00000 P109: \n')
f.write(' 0.00000 P110: \n')
f.write(' 0.00000 P111: \n')
f.write(' 0.00000 P112: \n')
f.write(' 0.00000 P113: \n')
f.write(' 0.00000 P114: \n')
f.write(' 0.00000 P115: \n')
f.write(' 0.00000 P116: =nelx for gtp solver\n')
f.write(' 0.00000 P117: =nely for gtp solver\n')
f.write(' 0.00000 P118: =nelz for gtp solver\n')
f.write(' 4 Lines of passive scalar data follows2 CONDUCT, 2RHOCP\n')
f.write(' 1.00000 1.00000 1.00000 1.00000 1.00000\n')
f.write(' 1.00000 1.00000 1.00000 1.00000\n')
f.write(' 1.00000 1.00000 1.00000 1.00000 1.00000\n')
f.write(' 1.00000 1.00000 1.00000 1.00000\n')
f.write(' 13 LOGICAL SWITCHES FOLLOW\n')
f.write(' T IFFLOW\n')
if (if_therm):
f.write(' ***** THERMAL BOUNDARY CONDITIONS *****\n')
f.write(' T IFHEAT\n')
else:
f.write(' F IFHEAT\n')
f.write(' T IFTRAN\n')
f.write(' T F F F F F F F F F F IFNAV & IFADVC (convection in P.S. fields)\n')
f.write(' F F T T T T T T T T T T IFTMSH (IF mesh for this field is T mesh)\n')
f.write(' F IFAXIS\n')
f.write(' F IFSTRS\n')
f.write(' F IFSPLIT\n')
f.write(' F IFMGRID\n')
f.write(' F IFMODEL\n')
f.write(' F IFKEPS\n')
f.write(' F IFMVBD\n')
f.write(' F IFCHAR\n')
f.write(' 2.00000 2.00000 -1.00000 -1.00000 XFAC,YFAC,XZERO,YZERO\n')
f.write(' ***** MESH DATA ***** 6 lines are X,Y,Z;X,Y,Z. Columns corners 1-4;5-8\n')
f.write(' ***** CURVED SIDE DATA *****\n')
# f.write(' 0 Curved sides follow IEDGE,IEL,CURVE(I),I=1,5, CCURVE\n')
f.write(' ***** BOUNDARY CONDITIONS *****\n')
f.write(' ***** FLUID BOUNDARY CONDITIONS *****\n')
if (if_therm):
f.write(' ***** THERMAL BOUNDARY CONDITIONS *****\n')
else:
f.write(' ***** NO THERMAL BOUNDARY CONDITIONS *****\n')
f.write(' 0 PRESOLVE/RESTART OPTIONS *****\n')
f.write(' 7 INITIAL CONDITIONS *****\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' C Default\n')
f.write(' ***** DRIVE FORCE DATA ***** BODY FORCE, FLOW, Q\n')
f.write(' 4 Lines of Drive force data follow\n')
f.write(' C\n')
f.write(' C\n')
f.write(' C\n')
f.write(' C\n')
f.write(' ***** Variable Property Data ***** Overrrides Parameter data.\n')
f.write(' 1 Lines follow.\n')
f.write(' 0 PACKETS OF DATA FOLLOW\n')
f.write(' ***** HISTORY AND INTEGRAL DATA *****\n')
f.write(' 0 POINTS. Hcode, I,J,H,IEL\n')
f.write(' ***** OUTPUT FIELD SPECIFICATION *****\n')
f.write(' 6 SPECIFICATIONS FOLLOW\n')
f.write(' T COORDINATES\n')
f.write(' T VELOCITY\n')
f.write(' T PRESSURE\n')
f.write(' T TEMPERATURE\n')
f.write(' F TEMPERATURE GRADIENT\n')
f.write(' 0 PASSIVE SCALARS\n')
f.write(' ***** OBJECT SPECIFICATION *****\n')
f.write(' 0 Surface Objects\n')
f.write(' 0 Volume Objects\n')
f.write(' 0 Edge Objects\n')
f.write(' 0 Point Objects\n')
f.close()
def dump_input_vars(R, nR, nSq, nZ, L_z, N, Re_t, stretch_sq, dr_sq_ratio,\
dr_sq_int_ratio, distri_on, a_interf,\
tog_r_out_const, tog_a_on_dist):
""" Print all the input variables so the output can be
saved and used to correctly recreate the mesh.
"""
print('INPUT VARIABLES:')
print('----------------')
print('R = {0:10.5f}'.format(R))
print('nR = {0:10.5f}'.format(nR))
print('nZ = {0:10.5f}'.format(nZ))
print('L_z = {0:10.5f}'.format(L_z))
print('nSq = {0:10.5f}'.format(nSq))
print('N = {0:10.5f}'.format(N))
print('Re_t = {0:10.5f}'.format(Re_t))
print('stretch_sq = {0:10.5f}'.format(stretch_sq))
print('dr_sq_ratio = {0:10.5f}'.format(dr_sq_ratio))
print('dr_sq_int_ratio = {0:10.5f}'.format(dr_sq_int_ratio))
print('distri_on = {0:10.5f}'.format(distri_on))
print('a_interf = {0:10.5f}'.format(a_interf))
print('tog_r_out_const = {0:10.5f}'.format(tog_r_out_const))
print('tog_a_on_dist = {0:10.5f}'.format(tog_a_on_dist))
def check_mesh_quality(elements, nR, nSq, nZ, R, L_z, N , Re_t):
""" Find minimum and maximum radial and circumferential
element lengths and element angles (distortion from 90°).
Find resolution of the generated mesh considering
Gauss-Lobatto-Legendre distribution of grid points
within each element.
"""
# only check first quadrant
nel_quarter = nSq**2 + (nR-nSq)*2*nSq # number of elements in one quarter
nPhi = 8*nSq
elements = elements[0:nel_quarter]
l_r_max = 0
l_r_min = 1e5
l_p_max = 0
l_p_min = 1e5
alph_max = 0
alph_min = 1e5
x_gll = np.zeros(N+1) # Distribution of GLL points in reference element [-1,1]
d_x_gll = np.zeros(N+1) # Length between two adjacent GLL points
el_wall_ind = nSq**2+nSq*2*(nR-nSq-1) # index of element at wall
w_ind = el_wall_ind # copy
cum_el = 0 # radial length of cumulative elements before 10th pt from the wall
dz_rec = 0 # recommended size of streamwise elements
# Get size of elements themselves
for el in elements:
n = el.number
i = ((n-1)-nSq**2)%(nSq*2) # position in clockwise manner through each layer
if (n <= nSq**2 or i < nSq): # either in "square" section or upper onion part
vec1 = np.array([el.x[1]-el.x[0], el.y[1]-el.y[0]]) # corresponds to face 1
vec1_norm = np.linalg.norm(vec1)
vec2 = np.array([el.x[2]-el.x[1], el.y[2]-el.y[1]]) # corresponds to face 2
vec2_norm = np.linalg.norm(vec2)
vec3 = np.array([el.x[3]-el.x[2], el.y[3]-el.y[2]]) # corresponds to face 3
vec3_norm = np.linalg.norm(vec3)
vec4 = np.array([el.x[0]-el.x[3], el.y[0]-el.y[3]]) # corresponds to face 4
vec4_norm = np.linalg.norm(vec4)
l_rad = np.array([ vec4_norm, vec2_norm ])
l_phi = np.array([ vec1_norm, vec3_norm ])
alpha_12 = my_math.vec_angle(-vec1, vec2)
alpha_23 = my_math.vec_angle(vec2, -vec3)
alpha_34 = my_math.vec_angle(-vec3, vec4)
alpha_41 = my_math.vec_angle(-vec4, vec1)
alpha = np.array([ alpha_12, alpha_23, alpha_34, alpha_41 ])
l_rad_max = max(l_rad)
l_rad_min = min(l_rad)
l_phi_max = max(l_phi)
l_phi_min = min(l_phi)
alpha_max = max(alpha)
alpha_min = min(alpha)
# update previous values if necessary
if (l_rad_max > l_r_max):
l_r_max = l_rad_max
el_r_max = n
if (l_rad_min < l_r_min):
l_r_min = l_rad_min
el_r_min = n
if (l_phi_max > l_p_max):
l_p_max = l_phi_max
el_p_max = n
if (l_phi_min < l_p_min):
l_p_min = l_phi_min
el_p_min = n
if (alpha_max > alph_max):
alph_max = alpha_max
el_alph_max = n
if (alpha_min < alph_min):
alph_min = alpha_min
el_alph_min = n
# Get size of one element in streamwise direction
z_el = L_z/nZ
# Get size of the actual grid by considering GLL distribution of grid points
# GLL distribution on reference element x in [-1, 1]
x_gll = my_math.get_gll(N)
# Distance between two points on ref. element
d_x_gll = x_gll[0:-1] - x_gll[1:]
r_plus_min = l_r_min*min(d_x_gll)*0.5*Re_t
r_plus_max = l_r_max*max(d_x_gll)*0.5*Re_t
el_wall_ind = w_ind
# Resolution in radial direction calculated at y=0
r_1_plus = ((elements[w_ind].y[3] - elements[w_ind].y[0]))\
*min(d_x_gll)*0.5*Re_t
# First 10 point away from the wall is in which element?
away_from_wall = int(m.ceil(10 / (N+1)))
# Remaining pts up to 10th point x-th element
rem_pts = 10 % (N+1)*away_from_wall
# Reset rem_pts in case the element is full
if (rem_pts == 0):
rem_pts = (N+1)
while away_from_wall > 1:
# Cumulative elements' radial length at the wall closer than 10th pt
cum_el = cum_el + elements[w_ind].y[3] - elements[w_ind].y[0]
# Update
away_from_wall = away_from_wall -1
w_ind = w_ind - 2*nSq
r_10_plus = (cum_el + \
(elements[w_ind].y[3] - elements[w_ind].y[0])* \
np.sum(d_x_gll[:rem_pts])*0.5)*Re_t
# Resolution in circumferential direction
# First, we have to find the angle theta spanned by one element
theta_el = m.pi/2/(2*nSq)
# Only a portion of that is spanned between two adjacent grid points
theta_max_gp = theta_el*(max(d_x_gll)*0.5)
theta_min_gp = theta_el*(min(d_x_gll)*0.5)
r_theta_max = R*theta_max_gp*Re_t
r_theta_min = R*theta_min_gp*Re_t
# Resolution in streamwise direction z
z_max_plus = z_el*max(d_x_gll)*0.5*Re_t
z_min_plus = z_el*min(d_x_gll)*0.5*Re_t
# dz_rec = 10/(max(d_x_gll)*0.5*Re_t)
# Write a little output to stdout
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('Some information about the mesh size:')
print('-------------------------------------')
print('Total number of elements in plane: {0:d}'.format(len(elements)*4))
print('Delta R max = {0:10.5f} at {1:d}'.format(l_r_max, el_r_max))
print('Delta R min = {0:10.5f} at {1:d}'.format(l_r_min, el_r_min))
print('Delta phi max = {0:10.5f} at {1:d}'.format(l_p_max, el_p_max))
print('Delta phi min = {0:10.5f} at {1:d}'.format(l_p_min, el_p_min))
print('R*phi max = {0:10.5f}'.format(2*m.pi/nPhi*R))
print('alpha max = {0:10.5f}° at {1:d}'.format(alph_max*(180/m.pi), el_alph_max))
print('alpha min = {0:10.5f}° at {1:d}'.format(alph_min*(180/m.pi), el_alph_min))
print('Note that curvature is not considered here!')
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
print('RESOLUTION')
print('----------')
print('r+ min = {0:10.5f} (< 1 )'.format(r_plus_min))
print('r+ max = {0:10.5f} (< 5 )'.format(r_plus_max))
print('r1+ = {0:10.5f} (< 1 )'.format(r_1_plus))
print('r10+ = {0:10.5f} (<10 )'.format(r_10_plus))
print('R theta plus max = {0:10.5f}° (< 5°)'.format(r_theta_max))
print('R theta plus min = {0:10.5f}° (~1.5°)'.format(r_theta_min))
print('z+ max = {0:10.5f} (<10 )'.format(z_max_plus))
print('z+ min = {0:10.5f} (~3 )'.format(z_min_plus))
print('Radial resolution is evaluated at vertical axis.')
print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
def check_input(nR, nSq, nZ, R, L_z, th_bc_type, N, Re_t,\
if_therm):
""" This function performs some inital check on the input variables."""
if (not isinstance(nR, int)):
print('Error: Use integer value for nR.')
sys.exit(1)
if (not isinstance(nSq, int)):
print('Error: Use integer value for nSq.')
sys.exit(1)
if (not isinstance(nZ, int)):
print('Error: Use integer value for nZ.')
sys.exit(1)
if (not isinstance(N, int)):
print('Error: Use integer value for N.')
sys.exit(1)
if (nR < 2):
print('Error: Use at least 2 elements in radial directin (nR).')
sys.exit(1)
if (nR <= nSq):
print('Error: nR has to be larger than nSq.')
sys.exit(1)
| {
"repo_name": "Steffen1989/GenPipeMesh",
"path": "nek_utils.py",
"copies": "1",
"size": "116398",
"license": "mit",
"hash": 3198261230777750000,
"line_mean": 49.0176192523,
"line_max": 116,
"alpha_frac": 0.4593310479,
"autogenerated": false,
"ratio": 2.880608835540156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3839939883440156,
"avg_score": null,
"num_lines": null
} |
"A collection of individuals with fixed relationships"
import numpy as np
from pydigree.paths import fraternity
from pydigree.common import table
from pydigree.population import Population
class Pedigree(Population):
"A collection of individuals with fixed relationships"
def __init__(self, label=None):
"""
Create a pedigree.
:param label: pedigree label
"""
Population.__init__(self)
self.label = label
self.kinmat = {}
self.fratmat = {}
def __prepare_nonfounder_contraint(self, con):
if not con:
return lambda x: x.is_founder()
else:
return lambda x: x.is_founder() and con(x)
def bit_size(self):
"""
Returns the bit size of the pedigree. The bitsize is defined as 2*n-f
where n is the number of nonfounders and f is the number of founders.
This represents the number of bits it takes to represent the
inheritance vector in the Lander-Green algorithm.
:returns: bit size
:rtype: pedigree
"""
t = table([x.is_founder() for x in self.individuals])
return 2 * t[False] - t[True]
# Relationships
#
def kinship(self, id1, id2):
"""
Get the Malecot coefficient of coancestry for two individuals in
the pedigree. These are calculated recursively.
For pedigree objects, results are stored to reduce the calculation
time for kinship matrices.
:param id1: the label of a individual to be evaluated
:param id2: the label of a individual to be evaluated
:returns: Malecot's coefficient of coancestry
:rtype: float
Reference:
Lange. Mathematical and Statistical Methods for Genetic Analysis.
1997. Springer.
"""
pair = frozenset([id1, id2])
if pair in self.kinmat:
return self.kinmat[pair]
if id1 is None or id2 is None:
return 0
# Since with pedigree objects we're typically working with IDs,
# I define these functions to get parents for IDs by looking them
# up in the pedigree
def fa(lab):
return (self[lab].father.label
if self[lab].father is not None else None)
def mo(lab):
return (self[lab].mother.label
if self[lab].mother is not None else None)
# Use tuples here to take advantage of the implicit tuple ordering
# With depth as the first item, it assures that descendants aren't
# listed before their ancestors.
t1 = self[id1].depth, id1
t2 = self[id2].depth, id2
if id1 == id2:
k = (1 + self.kinship(fa(id1), mo(id1))) / 2.0
elif t1 < t2:
k = (self.kinship(id1, fa(id2)) + self.kinship(id1, mo(id2))) / 2.0
else:
k = (self.kinship(id2, fa(id1)) + self.kinship(id2, mo(id1))) / 2.0
self.kinmat[pair] = k
return k
def fraternity(self, id1, id2):
"""
Like Pedigree.kinship, this is a convenience function for getting
fraternity coefficients for two pedigree memebers by their ID label.
This is a wrapper for paths.fraternity
:param id1: the label of a individual to be evaluated
:param id2: the label of a individual to be evaluated
:returns: coefficient of fraternity
:rtype: float
"""
pair = frozenset([id1, id2])
if pair not in self.fratmat:
f = f = fraternity(self[id1], self[id2])
self.fratmat[pair] = f
return f
else:
return self.fratmat[pair]
def inbreeding(self, indlab):
"""
Like Pedigree.kinship, this is a convenience function for getting
inbreeding coefficients for individuals in pedigrees by their id
label. As inbreeding coefficients are the kinship coefficient of
the parents, this function calls Pedigree.kinship to check for
stored values.
:param id: the label of the individual to be evaluated
:returns: inbreeding coefficient
:rtype: a double
"""
ind = self[indlab]
if ind.is_founder():
return 0.0
if ind.father.is_founder() or ind.mother.is_founder():
return 0.0
return self.kinship(ind.father.label, ind.mother.label)
def additive_relationship_matrix(self, ids=None):
"""
Calculates an additive relationship matrix (the A matrix) for
quantiatitive genetics.
A_ij = 2 * kinship(i,j) if i != j.
(See the notes on function 'kinship')
A_ij = 1 + inbreeding(i) if i == j
(inbreeding(i) is equivalent to kinship(i.father,i.mother))
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
:returns: additive relationship matrix
:rtype: matrix
"""
if not ids:
ids = sorted(x.label for x in self.individuals)
else:
ids = [label for ped, label in ids if ped == self.label and
label in self.population.keys()]
mat = []
for a in ids:
row = []
for b in ids:
if a == b:
row.append(1 + self.inbreeding(a))
else:
row.append(2 * self.kinship(a, b))
mat.append(row)
return np.matrix(mat)
def dominance_relationship_matrix(self, ids=None):
"""
Calculates the dominance genetic relationship matrix (the D matrix)
for quantitative genetics.
D_ij = fraternity(i,j) if i != j
D_ij = 1 if i == j
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
:returns: dominance relationship matrix
:rtype: matrix
"""
if not ids:
ids = sorted(x.label for x in self.individuals)
else:
ids = [label for ped, label in ids if ped == self.label and
label in self.population.keys()]
mat = []
for a in ids:
row = []
for b in ids:
if a == b:
row.append(1)
else:
row.append(self.fraternity(a, b))
mat.append(row)
return np.matrix(mat)
def mitochondrial_relationship_matrix(self, ids=None):
"""
Calculates the mitochondrial relationship matrix.
M_ij = 1 if matriline(i) == matriline(j)
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
Returns: A numpy matrix
Reference:
Liu et al. "Association Testing of the Mitochondrial Genome Using
Pedigree Data". Genetic Epidemiology. (2013). 37,3:239-247
"""
if not ids:
inds = sorted((x for x in self.individuals), key=lambda x: x.label)
else:
inds = [self[id] for id in ids]
mat = []
for a in inds:
row = [1 if a.matriline() == b.matriline() else 0
for b in ids]
mat.append(row)
return np.matrix(mat)
# Gene dropping
#
def simulate_ibd_states(self, inds=None):
"""
Simulate IBD patterns by gene dropping: Everyone's genotypes reflect
the founder chromosome that they received the genotype from. You can
then use the ibs function to determine IBD state. This effectively an
infinite-alleles simulation.
Returns: Nothing
"""
self.clear_genotypes()
for x in self.founders():
x.label_genotypes()
if inds:
for x in inds:
x.get_genotypes()
else:
for x in self.nonfounders():
x.get_genotypes()
| {
"repo_name": "jameshicks/pydigree",
"path": "pydigree/pedigree.py",
"copies": "1",
"size": "8471",
"license": "apache-2.0",
"hash": -3853741909646653000,
"line_mean": 32.6150793651,
"line_max": 79,
"alpha_frac": 0.5774997049,
"autogenerated": false,
"ratio": 3.875114364135407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4952614069035407,
"avg_score": null,
"num_lines": null
} |
""" A collection of input methods that aren't necessarily buttons """
from rcfc.server import register_post_with_state, register_post_with_input
from rcfc.groups import convertGroup
class DIRECTIONAL:
LEFT = "left"
RIGHT = "right"
def slider(text, getter, input_range=(0, 100), group=None):
min_value, max_value = input_range
def wrapper(setter):
slider = {
"text": text,
"type": "input.slider",
"min": min_value,
"max": max_value,
"groups": convertGroup(group)
}
register_post_with_state(slider, getter, setter)
return wrapper
def left_right_arrows(text, group=None):
def wrapper(setter):
arrows = {
"text": text,
"type": "input.leftright",
"groups": convertGroup(group)
}
register_post_with_input(arrows, setter)
return wrapper
def colorpicker(text, getter, group=None):
def wrapper(setter):
colorpicker = {
"text": text,
"type": "input.colorpicker",
"groups": convertGroup(group)
}
register_post_with_state(colorpicker, getter, setter)
return wrapper
| {
"repo_name": "pviafore/rcfc",
"path": "rcfc/input_methods.py",
"copies": "1",
"size": "1204",
"license": "mit",
"hash": 5777970101391696000,
"line_mean": 25.1739130435,
"line_max": 74,
"alpha_frac": 0.5863787375,
"autogenerated": false,
"ratio": 3.8222222222222224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9908600959722222,
"avg_score": 0,
"num_lines": 46
} |
"""A collection of internal helper routines.
.. note::
This module is intended for the internal use in python-icat and is
not considered to be part of the API. No effort will be made to
keep anything in here compatible between different versions.
>>> name = 'rbeck'
>>> qps = simpleqp_quote(name)
>>> qps
'rbeck'
>>> s = simpleqp_unquote(qps)
>>> s
u'rbeck'
>>> name == s
True
>>> fullName = u'Rudolph Beck-D\\xfclmen'
>>> qps = simpleqp_quote(fullName)
>>> qps
'Rudolph=20Beck=2DD=C3=BClmen'
>>> s = simpleqp_unquote(qps)
>>> s
u'Rudolph Beck-D\\xfclmen'
>>> fullName == s
True
>>> parse_attr_val("name-jdoe")
{'name': 'jdoe'}
>>> key = "facility-(name-ESNF)_name-2010=2DE2=2D0489=2D1_visitId-1"
>>> d = parse_attr_val(key)
>>> d
{'visitId': '1', 'name': '2010=2DE2=2D0489=2D1', 'facility': 'name-ESNF'}
>>> parse_attr_val(d['facility'])
{'name': 'ESNF'}
"""
from contextlib import contextmanager
import datetime
import logging
import suds.sax.date
def simpleqp_quote(obj):
"""Simple quote in quoted-printable style."""
esc = '='
hex = '0123456789ABCDEF'
asc = ('0123456789''ABCDEFGHIJKLMNOPQRSTUVWXYZ''abcdefghijklmnopqrstuvwxyz')
if not isinstance(obj, str):
obj = str(obj)
s = obj.encode('utf-8')
out = []
for i in s:
c = chr(i)
if c in asc:
out.append(c)
else:
out.append(esc + hex[i//16] + hex[i%16])
return ''.join(out)
def simpleqp_unquote(qs):
"""Simple unquote from quoted-printable style."""
esc = '='
hex = '0123456789ABCDEF'
out = []
i = iter(qs)
while True:
try:
c = next(i)
except StopIteration:
break
if c == esc:
try:
hh = next(i)
hl = next(i)
except StopIteration:
raise ValueError("Invalid quoted string '%s'" % qs)
vh = hex.index(hh)
vl = hex.index(hl)
out.append(16*vh+vl)
else:
out.append(ord(c))
return bytes(out).decode('utf-8')
def parse_attr_val(avs):
"""Parse an attribute value list string.
Parse a string representing a list of attribute and value pairs in
the form::
attrvaluestring ::= attrvalue
| attrvalue '_' attrvaluestring
attrvalue ::= attr '-' value
value ::= simplevalue
| '(' attrvaluestring ')'
attr ::= [A-Za-z]+
simplevalue ::= [0-9A-Za-z=]+
Return a dict with the attributes as keys. In the case of an
attrvaluestring in parenthesis, this string is set as value in the
dict without any further processing.
"""
# It might be easier to implement this using pyparsing, but this
# module is not in the standard library and I don't want to depend
# on external packages for this.
res = {}
while len(avs) > 0:
hyphen = avs.index('-')
if hyphen == 0 or hyphen == len(avs)-1:
raise ValueError("malformed '%s'" % avs)
attr = avs[:hyphen]
# FIXME: Should check that attr matches [A-Za-z]+ here.
if avs[hyphen+1] == '(':
# Need to find the matching ')'
op = 0
for i in range(hyphen+1,len(avs)):
if avs[i] == '(':
op += 1
elif avs[i] == ')':
op -= 1
if op == 0:
break
if op > 0:
raise ValueError("malformed '%s'" % avs)
value = avs[hyphen+2:i]
if i == len(avs) - 1:
avs = ""
elif avs[i+1] == '_':
avs = avs[i+2:]
else:
raise ValueError("malformed '%s'" % avs)
else:
us = avs.find('_', hyphen+1)
if us >= 0:
value = avs[hyphen+1:us]
avs = avs[us+1:]
else:
value = avs[hyphen+1:]
avs = ""
# FIXME: Should check that value matches [0-9A-Za-z=]+ here.
res[attr] = value
return res
def parse_attr_string(s, attrtype):
"""Parse the string representation of an entity attribute.
Note: for Date we use the parser from :mod:`suds.sax.date`. If
this is the original Suds version, this parser is buggy and might
yield wrong results. But the same buggy parser is also applied by
Suds internally for the Date values coming from the ICAT server.
Since we are mainly interested to compare with values from the
ICAT server, we have a fair chance that this comparision
nevertheless yields valid results.
"""
if s is None:
return None
if attrtype in ['String', 'ParameterValueType', 'StudyStatus']:
return s
elif attrtype in ['Integer', 'Long']:
return int(s)
elif attrtype == 'Double':
return float(s)
elif attrtype == 'boolean':
# This is somewhat too liberal. Valid values according XML
# Schema Definition are only {"0", "false", "1", "true"} (case
# sensitive).
if s.lower() in ["0", "no", "n", "false", "f", "off"]:
return False
elif s.lower() in ["1", "yes", "y", "true", "t", "on"]:
return True
else:
raise ValueError("Invalid boolean value '%s'" % s)
elif attrtype == 'Date':
d = suds.sax.date.DateTime(s)
try:
# jurko fork
return d.value
except AttributeError:
# original Suds
return d.datetime
else:
raise ValueError("Invalid attribute type '%s'" % attrtype)
def ms_timestamp(dt):
"""Convert :class:`datetime.datetime` or string to timestamp in
milliseconds since epoch.
"""
if dt is None:
return None
if isinstance(dt, str):
dt = parse_attr_string(dt, "Date")
if not dt.tzinfo:
# Unaware datetime values are assumed to be UTC.
dt = dt.replace(tzinfo=datetime.timezone.utc)
ts = 1000 * dt.timestamp()
return int(ts)
@contextmanager
def disable_logger(name):
"""Context manager to temporarily disable a logger.
"""
logger = logging.getLogger(name)
sav_state = logger.disabled
logger.disabled = True
yield
logger.disabled = sav_state
| {
"repo_name": "icatproject/python-icat",
"path": "icat/helper.py",
"copies": "1",
"size": "6371",
"license": "apache-2.0",
"hash": 776423161089985900,
"line_mean": 29.4832535885,
"line_max": 80,
"alpha_frac": 0.5499921519,
"autogenerated": false,
"ratio": 3.7432432432432434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9788905658126728,
"avg_score": 0.0008659474033030217,
"num_lines": 209
} |
"""A collection of (internal usage) utils for rewriting and propagation in the hierarchy."""
import copy
import networkx as nx
import warnings
from regraph.category_op import (compose,
compose_chain,
compose_relation_dicts,
get_unique_map_from_pushout,
get_unique_map_to_pullback,
id_of,
is_total_homomorphism,
pullback_complement,
pushout,
pushout_from_relation,
left_relation_dict,
right_relation_dict)
from regraph import primitives
from regraph.exceptions import RewritingError, TotalityWarning
from regraph.rules import Rule
from regraph.utils import keys_by_value
def _rewrite_base(hierarchy, graph_id, rule, instance,
lhs_typing, rhs_typing, inplace=False):
g_m, p_g_m, g_m_g =\
pullback_complement(rule.p, rule.lhs, hierarchy.node[graph_id].graph,
rule.p_lhs, instance, inplace)
g_prime, g_m_g_prime, r_g_prime = pushout(rule.p, g_m, rule.rhs,
p_g_m, rule.p_rhs, inplace)
relation_updates = []
for related_g in hierarchy.adjacent_relations(graph_id):
relation_updates.append((graph_id, related_g))
updated_homomorphisms = dict()
for typing_graph in hierarchy.successors(graph_id):
new_hom = copy.deepcopy(hierarchy.edge[graph_id][typing_graph].mapping)
removed_nodes = set()
new_nodes = dict()
for node in rule.lhs.nodes():
p_keys = keys_by_value(rule.p_lhs, node)
# nodes that were removed
if len(p_keys) == 0:
removed_nodes.add(instance[node])
elif len(p_keys) == 1:
if typing_graph not in rhs_typing.keys() or\
rule.p_rhs[p_keys[0]] not in rhs_typing[typing_graph].keys():
if r_g_prime[rule.p_rhs[p_keys[0]]] in new_hom.keys():
removed_nodes.add(r_g_prime[rule.p_rhs[p_keys[0]]])
# nodes were clonned
elif len(p_keys) > 1:
for k in p_keys:
if typing_graph in rhs_typing.keys() and\
rule.p_rhs[k] in rhs_typing[typing_graph].keys():
new_nodes[r_g_prime[rule.p_rhs[k]]] =\
list(rhs_typing[typing_graph][rule.p_rhs[k]])[0]
else:
removed_nodes.add(r_g_prime[rule.p_rhs[k]])
for node in rule.rhs.nodes():
p_keys = keys_by_value(rule.p_rhs, node)
# nodes that were added
if len(p_keys) == 0:
if typing_graph in rhs_typing.keys():
if node in rhs_typing[typing_graph].keys():
new_nodes[node] = list(rhs_typing[
typing_graph][node])[0]
# nodes that were merged
elif len(p_keys) > 1:
for k in p_keys:
removed_nodes.add(p_g_m[k])
# assign new type of node
if typing_graph in rhs_typing.keys():
if node in rhs_typing[typing_graph].keys():
new_type = list(rhs_typing[typing_graph][node])
new_nodes[r_g_prime[node]] = new_type
# update homomorphisms
for n in removed_nodes:
if n in new_hom.keys():
del new_hom[n]
new_hom.update(new_nodes)
updated_homomorphisms.update({
(graph_id, typing_graph): new_hom
})
return {
"graph": (g_m, p_g_m, g_m_g, g_prime, g_m_g_prime, r_g_prime),
"homomorphisms": updated_homomorphisms,
"relations": relation_updates
}
def _propagate_rule_to(graph, origin_typing, rule, instance, p_origin,
inplace=False):
if inplace is True:
graph_prime = graph
else:
graph_prime = copy.deepcopy(graph)
lhs_removed_nodes = rule.removed_nodes()
lhs_removed_node_attrs = rule.removed_node_attrs()
p_removed_edges = rule.removed_edges()
p_removed_edge_attrs = rule.removed_edge_attrs()
lhs_cloned_nodes = rule.cloned_nodes()
graph_prime_graph = id_of(graph.nodes())
graph_prime_origin = dict()
for lhs_node in rule.lhs.nodes():
origin_node = instance[lhs_node]
g_nodes = keys_by_value(
origin_typing, origin_node)
for node in g_nodes:
if lhs_node in lhs_removed_nodes:
primitives.remove_node(
graph_prime, node)
del graph_prime_graph[node]
else:
graph_prime_origin[node] = origin_node
for lhs_node, p_nodes in lhs_cloned_nodes.items():
nodes_to_clone = keys_by_value(origin_typing, instance[lhs_node])
for node in nodes_to_clone:
for i, p_node in enumerate(p_nodes):
if i == 0:
graph_prime_origin[node] = p_origin[p_node]
graph_prime_graph[node] = node
else:
new_name = primitives.clone_node(
graph_prime,
node)
graph_prime_origin[new_name] = p_origin[p_node]
graph_prime_graph[new_name] = node
for lhs_node, attrs in lhs_removed_node_attrs.items():
nodes_to_remove_attrs = keys_by_value(
origin_typing, instance[lhs_node])
for node in nodes_to_remove_attrs:
primitives.remove_node_attrs(
graph_prime,
node, attrs)
for p_u, p_v in p_removed_edges:
us = keys_by_value(graph_prime_origin, p_origin[p_u])
vs = keys_by_value(graph_prime_origin, p_origin[p_v])
for u in us:
for v in vs:
if (u, v) in graph_prime.edges():
primitives.remove_edge(
graph_prime, u, v)
for (p_u, p_v), attrs in p_removed_edge_attrs.items():
us = keys_by_value(origin_typing, p_origin[p_u])
vs = keys_by_value(origin_typing, p_origin[p_v])
for u in us:
for v in vs:
primitives.removed_edge_attrs(
graph_prime, u, v, attrs)
return (graph_prime, graph_prime_graph, graph_prime_origin)
def _propagate_up(hierarchy, graph_id, rule, instance,
p_origin_m, origin_m_origin_prime, inplace=False):
updated_graphs = dict()
updated_homomorphisms = dict()
updated_relations = set()
updated_rules = dict()
updated_rule_h = dict()
for graph in nx.bfs_tree(hierarchy, graph_id, reverse=True):
if graph != graph_id:
if isinstance(hierarchy.node[graph], hierarchy.graph_node_cls):
origin_typing = hierarchy.get_typing(graph, graph_id)
(graph_prime, graph_prime_graph, graph_prime_origin) =\
_propagate_rule_to(
hierarchy.node[graph].graph,
origin_typing, rule, instance,
p_origin_m, inplace)
updated_graphs[graph] =\
(graph_prime, graph_prime_graph, None, graph_prime_origin)
for suc in hierarchy.successors(graph):
if suc == graph_id:
graph_prime_suc_prime =\
compose(
graph_prime_origin,
origin_m_origin_prime)
elif suc in updated_graphs.keys():
graph_prime_suc_prime =\
get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
graph_prime_graph,
hierarchy.edge[graph][suc].mapping),
graph_prime_origin)
else:
graph_prime_suc_prime = compose(
graph_prime_graph, hierarchy.edge[graph][suc].mapping)
updated_homomorphisms[(graph, suc)] = graph_prime_suc_prime
for pred in hierarchy.predecessors(graph):
if pred in updated_graphs.keys():
pred_m_graph_m = get_unique_map_to_pullback(
graph_prime.nodes(),
graph_prime_graph,
graph_prime_origin,
updated_graphs[pred][1],
updated_graphs[pred][3]
)
updated_homomorphisms[
(pred, graph)] = pred_m_graph_m
# propagate changes to adjacent relations
for related_g in hierarchy.adjacent_relations(graph):
updated_relations.add((graph, related_g))
else:
if isinstance(hierarchy.node[graph], hierarchy.graph_node_cls):
rule_to_rewrite = hierarchy.node[graph].rule
(lhs_origin_typing,
p_origin_typing,
rhs_origin_typing) =\
hierarchy.get_rule_typing(graph, graph_id)
(lhs_prime, lhs_prime_lhs, lhs_prime_origin) =\
_propagate_rule_to(
rule_to_rewrite.lhs,
lhs_origin_typing, rule, instance,
p_origin_m, inplace)
(pr_prime, pr_prime_pr, pr_prime_origin) =\
_propagate_rule_to(
rule_to_rewrite.p,
p_origin_typing, rule, instance,
p_origin_m, inplace)
(rhs_prime, rhs_prime_rhs, rhs_prime_origin) =\
_propagate_rule_to(
rule_to_rewrite.rhs,
rhs_origin_typing, rule, instance,
p_origin_m, inplace)
# find p_m -> lhs_m
new_p_lhs = get_unique_map_to_pullback(
lhs_prime.nodes(),
lhs_prime_lhs,
lhs_prime_origin,
compose(pr_prime_pr, rule.p_lhs),
pr_prime_origin
)
# find p_m -> rhs_m
new_p_rhs = get_unique_map_to_pullback(
rhs_prime.nodes(),
rhs_prime_rhs,
rhs_prime_origin,
compose(pr_prime_pr, rule.p_rhs),
pr_prime_origin
)
new_rule =\
Rule(pr_prime, lhs_prime, rhs_prime,
new_p_lhs, new_p_rhs)
updated_rules[graph] = new_rule
for suc in hierarchy.successors(graph):
if suc == graph_id:
lhs_prime_suc_prime =\
compose(lhs_prime_origin,
origin_m_origin_prime)
rhs_prime_suc_prime =\
compose(rhs_prime_origin,
origin_m_origin_prime)
if suc in updated_graphs.keys():
lhs_prime_suc_prime = get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
lhs_prime_lhs,
hierarchy.edge[graph][suc].lhs_mapping),
lhs_prime_origin
)
rhs_prime_suc_prime = get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
rhs_prime_rhs,
hierarchy.edge[graph][suc].rhs_mapping
),
rhs_prime_origin
)
else:
lhs_prime_suc_prime =\
compose(
lhs_prime_lhs,
hierarchy.edge[graph][suc].lhs_mapping)
rhs_prime_suc_prime =\
compose(
rhs_prime_rhs,
hierarchy.edge[graph][suc].rhs_mapping)
updated_rule_h[(graph, suc)] =\
(lhs_prime_suc_prime, rhs_prime_suc_prime)
return {
"graphs": updated_graphs,
"homomorphisms": updated_homomorphisms,
"rules": updated_rules,
"rule_homomorphisms": updated_rule_h,
"relations": updated_relations
}
def _propagate_down(hierarchy, origin_id, origin_construct,
rule, instance, rhs_typing_rels, inplace=False):
"""Propagate changes down the hierarchy."""
updated_graphs = dict()
updated_homomorphisms = dict()
updated_relations = []
(origin_m,
origin_m_origin,
origin_prime,
origin_m_origin_prime,
rhs_origin_prime) = origin_construct
for graph in nx.bfs_tree(hierarchy, origin_id):
if graph != origin_id:
relation_g_rhs = set()
for key, values in rhs_typing_rels[graph].items():
for v in values:
relation_g_rhs.add((v, key))
(g_prime, g_g_prime, rhs_g_prime) =\
pushout_from_relation(
hierarchy.node[graph].graph, rule.rhs,
relation_g_rhs, inplace)
updated_graphs[graph] = (g_prime, g_g_prime, rhs_g_prime)
for suc in hierarchy.successors(graph):
if suc in updated_graphs.keys():
updated_homomorphisms[(graph, suc)] =\
get_unique_map_from_pushout(
g_prime.nodes(),
g_g_prime,
rhs_g_prime,
compose(
hierarchy.edge[graph][suc].mapping,
updated_graphs[suc][1]),
updated_graphs[suc][2])
for pred in hierarchy.predecessors(graph):
if pred == origin_id:
updated_homomorphisms[(pred, graph)] =\
get_unique_map_from_pushout(
origin_prime.nodes(),
origin_m_origin_prime,
rhs_origin_prime,
compose_chain(
[origin_m_origin,
hierarchy.edge[pred][graph].mapping,
g_g_prime]),
rhs_g_prime)
elif pred in updated_graphs.keys():
updated_homomorphisms[(pred, graph)] =\
get_unique_map_from_pushout(
updated_graphs[pred][0].nodes(),
updated_graphs[pred][1],
updated_graphs[pred][2],
compose(
hierarchy.edge[pred][graph].mapping,
g_g_prime),
rhs_g_prime)
# propagate changes to adjacent relations
for related_g in hierarchy.adjacent_relations(graph):
updated_relations.append((graph, related_g))
return {
"graphs": updated_graphs,
"homomorphisms": updated_homomorphisms,
"relations": updated_relations
}
def _apply_changes(hierarchy, upstream_changes, downstream_changes):
"""Apply changes to the hierarchy."""
# update relations
visited = set()
rels = dict()
for g1, g2 in upstream_changes["relations"]:
if (g1, g2) not in visited and (g2, g1) not in visited:
new_pairs = set()
# upstream changes in both related graphs
if (g2, g1) in upstream_changes["relations"]:
# update left side
new_left_dict = dict()
left_dict = left_relation_dict(hierarchy.relation[g1][g2].rel)
for node in upstream_changes["graphs"][g1][0].nodes():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
new_left_dict[node] = left_dict[old_node]
# update right side
new_right_dict = dict()
right_dict = right_relation_dict(hierarchy.relation[g1][g2].rel)
for node in upstream_changes["graphs"][g2][0].nodes():
old_node = upstream_changes["graphs"][g2][1][node]
if old_node in right_dict.keys():
new_right_dict[node] = right_dict[old_node]
new_pairs = compose_relation_dicts(
new_left_dict, new_right_dict)
# downstream changes in one of the related graphs
elif "relations" in downstream_changes.keys() and\
"graphs" in downstream_changes.keys() and\
(g2, g1) in downstream_changes["relations"]:
# update left side
left_dict = left_relation_dict(hierarchy.relation[g1][g2].rel)
for node in upstream_changes["graphs"][g1][0].nodes():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
for right_el in left_dict[old_node]:
new_pairs.add(
(node,
downstream_changes[
"graphs"][g2][1][right_el]))
# updates in a single graph involved in the relation
else:
left_dict = left_relation_dict(hierarchy.relation[g1][g2].rel)
for node in upstream_changes["graphs"][g1][0].nodes():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
for el in left_dict[old_node]:
new_pairs.add(
(node, el))
rels[(g1, g2)] = new_pairs
visited.add((g1, g2))
if "relations" in downstream_changes.keys() and\
"graphs" in downstream_changes.keys():
for g1, g2 in downstream_changes["relations"]:
if (g1, g2) not in visited and (g2, g1) not in visited:
# # downstream changes in both related graphs
new_pairs = set()
if (g2, g1) in downstream_changes["relations"]:
left_dict = left_relation_dict(hierarchy.relation[g1][g2])
for left_el, right_els in left_dict.items():
new_left_node =\
downstream_changes["graphs"][1][left_el]
for right_el in right_els:
new_right_node =\
downstream_changes["graphs"][1][right_el]
new_pairs.add((new_left_node, new_right_node))
else:
left_dict = left_relation_dict(hierarchy.relation[g1][g2])
for left_el, right_els in left_dict.items():
new_left_node =\
downstream_changes["graphs"][1][left_el]
for right_el in right_els:
new_pairs.add((new_left_node, right_el))
rels[(g1, g2)] = new_pairs
visited.add((g1, g2))
# update graphs
for graph, (graph_m, _, graph_prime, _) in upstream_changes["graphs"].items():
if graph_prime is not None:
hierarchy.node[graph].graph = graph_prime
else:
hierarchy.node[graph].graph = graph_m
hierarchy.graph[graph] = hierarchy.node[graph].graph
if "graphs" in downstream_changes.keys():
for graph, (graph_prime, _, _) in downstream_changes["graphs"].items():
hierarchy.node[graph].graph = graph_prime
hierarchy.graph[graph] = hierarchy.node[graph].graph
for (g1, g2), rel in rels.items():
old_attrs = copy.deepcopy(hierarchy.relation[g1][g2].attrs)
hierarchy.remove_relation(g1, g2)
hierarchy.add_relation(g1, g2, rel, old_attrs)
# update homomorphisms
updated_homomorphisms = dict()
updated_homomorphisms.update(upstream_changes["homomorphisms"])
if "homomorphisms" in downstream_changes.keys():
updated_homomorphisms.update(downstream_changes["homomorphisms"])
for (s, t), mapping in updated_homomorphisms.items():
total = False
if hierarchy.edge[s][t].total:
if not is_total_homomorphism(hierarchy.node[s].graph.nodes(), mapping):
warnings.warn(
"Total typing '%s->%s' became partial after rewriting!" %
(s, t),
TotalityWarning
)
else:
total = True
hierarchy.edge[s][t] = hierarchy.graph_typing_cls(
mapping, total, hierarchy.edge[s][t].attrs
)
hierarchy.typing[s][t] = hierarchy.edge[s][t].mapping
# update rules & rule homomorphisms
for rule, new_rule in upstream_changes["rules"].items():
hierarchy.node[rule] = hierarchy.rule_node_cls(
new_rule, hierarchy.node[rule].attrs
)
hierarchy.rule[rule] = hierarchy.node[rule].rule
for (s, t), (lhs_h, rhs_h) in upstream_changes["rule_homomorphisms"].items():
hierarchy.edge[s][t] = hierarchy.rule_typing_cls(
lhs_h, rhs_h,
hierarchy.edge[s][t].attrs
)
hierarchy.rule_lhs_typing[s][t] = hierarchy.edge[s][t].lhs_mapping
hierarchy.rule_rhs_typing[s][t] = hierarchy.edge[s][t].rhs_mapping
return
| {
"repo_name": "eugeniashurko/ReGraph",
"path": "regraph/rewriting_utils.py",
"copies": "1",
"size": "23150",
"license": "mit",
"hash": -8266582680949082000,
"line_mean": 40.9384057971,
"line_max": 92,
"alpha_frac": 0.4722678186,
"autogenerated": false,
"ratio": 4.323870003735525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015152231086029794,
"num_lines": 552
} |
"""A collection of (internal usage) utils for rewriting in the hierarchy."""
import time
import copy
import networkx as nx
import warnings
from regraph.networkx.category_utils import (compose,
compose_chain,
compose_relation_dicts,
get_unique_map_from_pushout,
get_unique_map_to_pullback,
get_unique_map_to_pullback_complement,
id_of,
is_total_homomorphism,
pullback_complement,
pushout,
pullback,
pushout_from_relation,
image_factorization)
from regraph import primitives
from regraph.exceptions import TotalityWarning
from regraph.rules import Rule, compose_rules
from regraph.utils import keys_by_value
def _rewrite_base(hierarchy, graph_id, rule, instance,
rhs_typing, inplace=False):
g_m, p_g_m, g_m_g =\
pullback_complement(rule.p, rule.lhs, hierarchy.get_graph(graph_id),
rule.p_lhs, instance, inplace)
g_prime, g_m_g_prime, r_g_prime = pushout(rule.p, g_m, rule.rhs,
p_g_m, rule.p_rhs, inplace)
relation_updates = []
for related_g in hierarchy.adjacent_relations(graph_id):
relation_updates.append((graph_id, related_g))
updated_homomorphisms = dict()
for typing_graph in hierarchy.successors(graph_id):
new_hom = copy.deepcopy(hierarchy.adj[graph_id][typing_graph]["mapping"])
removed_nodes = set()
new_nodes = dict()
for node in rule.lhs.nodes():
p_keys = keys_by_value(rule.p_lhs, node)
# nodes that were removed
if len(p_keys) == 0:
removed_nodes.add(instance[node])
elif len(p_keys) == 1:
if typing_graph not in rhs_typing.keys() or\
rule.p_rhs[p_keys[0]] not in rhs_typing[typing_graph].keys():
if r_g_prime[rule.p_rhs[p_keys[0]]] in new_hom.keys():
removed_nodes.add(r_g_prime[rule.p_rhs[p_keys[0]]])
# nodes were clonned
elif len(p_keys) > 1:
for k in p_keys:
if typing_graph in rhs_typing.keys() and\
rule.p_rhs[k] in rhs_typing[typing_graph].keys():
new_nodes[r_g_prime[rule.p_rhs[k]]] =\
list(rhs_typing[typing_graph][rule.p_rhs[k]])[0]
else:
removed_nodes.add(r_g_prime[rule.p_rhs[k]])
for node in rule.rhs.nodes():
p_keys = keys_by_value(rule.p_rhs, node)
# nodes that were added
if len(p_keys) == 0:
if typing_graph in rhs_typing.keys():
if node in rhs_typing[typing_graph].keys():
new_nodes[node] = list(rhs_typing[
typing_graph][node])[0]
# nodes that were merged
elif len(p_keys) > 1:
for k in p_keys:
removed_nodes.add(p_g_m[k])
# assign new type of node
if typing_graph in rhs_typing.keys():
if node in rhs_typing[typing_graph].keys():
new_type = list(rhs_typing[typing_graph][node])
new_nodes[r_g_prime[node]] = new_type
# update homomorphisms
for n in removed_nodes:
if n in new_hom.keys():
del new_hom[n]
new_hom.update(new_nodes)
updated_homomorphisms.update({
(graph_id, typing_graph): new_hom
})
return {
"graph": (g_m, p_g_m, g_m_g, g_prime, g_m_g_prime, r_g_prime),
"homomorphisms": updated_homomorphisms,
"relations": relation_updates
}
def _propagate_rule_up(graph, origin_typing, rule, instance, p_origin,
p_typing, inplace=False):
if inplace is True:
graph_prime = graph
else:
graph_prime = copy.deepcopy(graph)
if p_typing is None:
p_typing = {}
lhs_removed_nodes = rule.removed_nodes()
lhs_removed_node_attrs = rule.removed_node_attrs()
p_removed_edges = rule.removed_edges()
p_removed_edge_attrs = rule.removed_edge_attrs()
lhs_cloned_nodes = rule.cloned_nodes()
graph_prime_graph = id_of(graph.nodes())
graph_prime_origin = copy.deepcopy(origin_typing)
for lhs_node in rule.lhs.nodes():
origin_node = instance[lhs_node]
g_nodes = keys_by_value(
origin_typing, origin_node)
for node in g_nodes:
if lhs_node in lhs_removed_nodes:
primitives.remove_node(
graph_prime, node)
del graph_prime_graph[node]
del graph_prime_origin[node]
else:
graph_prime_origin[node] = origin_node
for lhs_node, p_nodes in lhs_cloned_nodes.items():
nodes_to_clone = keys_by_value(origin_typing, instance[lhs_node])
for node in nodes_to_clone:
if node in p_typing.keys():
p_nodes = p_typing[node]
for i, p_node in enumerate(p_nodes):
if i == 0:
graph_prime_origin[node] = p_origin[p_node]
graph_prime_graph[node] = node
else:
new_name = primitives.clone_node(
graph_prime,
node)
graph_prime_origin[new_name] = p_origin[p_node]
graph_prime_graph[new_name] = node
if len(p_nodes) == 0:
primitives.remove_node(graph_prime, node)
for lhs_node, attrs in lhs_removed_node_attrs.items():
nodes_to_remove_attrs = keys_by_value(
origin_typing, instance[lhs_node])
for node in nodes_to_remove_attrs:
primitives.remove_node_attrs(
graph_prime,
node, attrs)
for p_u, p_v in p_removed_edges:
us = keys_by_value(graph_prime_origin, p_origin[p_u])
vs = keys_by_value(graph_prime_origin, p_origin[p_v])
for u in us:
for v in vs:
if (u, v) in graph_prime.edges():
primitives.remove_edge(
graph_prime, u, v)
for (p_u, p_v), attrs in p_removed_edge_attrs.items():
us = keys_by_value(origin_typing, p_origin[p_u])
vs = keys_by_value(origin_typing, p_origin[p_v])
for u in us:
for v in vs:
primitives.removed_edge_attrs(
graph_prime, u, v, attrs)
return (graph_prime, graph_prime_graph, graph_prime_origin)
def _propagate_up(hierarchy, graph_id, rule, instance,
p_origin_m, origin_m_origin_prime,
p_typing, inplace=False):
updated_graphs = dict()
updated_homomorphisms = dict()
updated_relations = set()
updated_rules = dict()
updated_rule_h = dict()
if rule.is_restrictive():
for graph in hierarchy.bfs_tree(graph_id, reverse=True):
if graph != graph_id:
if hierarchy.is_graph(graph):
origin_typing = hierarchy.get_typing(graph, graph_id)
graph_p_typing = None
if graph in p_typing.keys():
graph_p_typing = p_typing[graph]
(graph_prime, graph_prime_graph, graph_prime_origin) =\
_propagate_rule_up(
hierarchy.get_graph(graph),
origin_typing, rule, instance,
p_origin_m, graph_p_typing, inplace)
updated_graphs[graph] =\
(graph_prime, graph_prime_graph,
None, graph_prime_origin)
graph_successors = list(hierarchy.successors(graph))
if graph_id in graph_successors:
updated_homomorphisms[(graph, graph_id)] =\
compose(
graph_prime_origin,
origin_m_origin_prime)
if len(rule.removed_nodes()) > 0 or\
len(rule.cloned_nodes()) > 0:
for suc in graph_successors:
if suc != graph_id:
if suc in updated_graphs.keys():
graph_prime_suc_prime =\
get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
graph_prime_graph,
hierarchy.adj[graph][suc][
"mapping"]),
graph_prime_origin)
else:
graph_prime_suc_prime = compose(
graph_prime_graph, hierarchy.adj[
graph][suc]["mapping"])
updated_homomorphisms[(graph, suc)] =\
graph_prime_suc_prime
for pred in hierarchy.predecessors(graph):
if pred in updated_graphs.keys():
pred_m_graph_m = get_unique_map_to_pullback(
graph_prime.nodes(),
graph_prime_graph,
graph_prime_origin,
updated_graphs[pred][1],
updated_graphs[pred][3]
)
updated_homomorphisms[
(pred, graph)] = pred_m_graph_m
# propagate changes to adjacent relations
for related_g in hierarchy.adjacent_relations(graph):
updated_relations.add((graph, related_g))
else:
rule_to_rewrite = hierarchy.node[graph]["rule"]
(lhs_origin_typing,
p_origin_typing,
rhs_origin_typing) =\
hierarchy.get_rule_typing(graph, graph_id)
(lhs_prime, lhs_prime_lhs, lhs_prime_origin) =\
_propagate_rule_up(
rule_to_rewrite.lhs,
lhs_origin_typing, rule, instance,
p_origin_m, {}, inplace=False)
(pr_prime, pr_prime_pr, pr_prime_origin) =\
_propagate_rule_up(
rule_to_rewrite.p,
p_origin_typing, rule, instance,
p_origin_m, {}, inplace=False)
(rhs_prime, rhs_prime_rhs, rhs_prime_origin) =\
_propagate_rule_up(
rule_to_rewrite.rhs,
rhs_origin_typing, rule, instance,
p_origin_m, {}, inplace=False)
# find p_m -> lhs_m
new_p_lhs = get_unique_map_to_pullback(
lhs_prime.nodes(),
lhs_prime_lhs,
lhs_prime_origin,
compose(pr_prime_pr, rule_to_rewrite.p_lhs),
pr_prime_origin
)
# find p_m -> rhs_m
new_p_rhs = get_unique_map_to_pullback(
rhs_prime.nodes(),
rhs_prime_rhs,
rhs_prime_origin,
compose(pr_prime_pr, rule_to_rewrite.p_rhs),
pr_prime_origin
)
new_rule =\
Rule(pr_prime, lhs_prime, rhs_prime,
new_p_lhs, new_p_rhs)
updated_rules[graph] = new_rule
for suc in hierarchy.successors(graph):
if suc == graph_id:
lhs_prime_suc_prime =\
compose(lhs_prime_origin,
origin_m_origin_prime)
rhs_prime_suc_prime =\
compose(rhs_prime_origin,
origin_m_origin_prime)
if suc in updated_graphs.keys():
lhs_prime_suc_prime = get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
lhs_prime_lhs,
hierarchy.adj[graph][suc]["lhs_mapping"]),
lhs_prime_origin
)
rhs_prime_suc_prime = get_unique_map_to_pullback(
updated_graphs[suc][0].nodes(),
updated_graphs[suc][1],
updated_graphs[suc][3],
compose(
rhs_prime_rhs,
hierarchy.adj[graph][suc]["rhs_mapping"]
),
rhs_prime_origin
)
else:
lhs_prime_suc_prime =\
compose(
lhs_prime_lhs,
hierarchy.adj[graph][suc]["lhs_mapping"])
rhs_prime_suc_prime =\
compose(
rhs_prime_rhs,
hierarchy.adj[graph][suc]["rhs_mapping"])
updated_rule_h[(graph, suc)] =\
(lhs_prime_suc_prime, rhs_prime_suc_prime)
else:
for pred in hierarchy.predecessors(graph_id):
if hierarchy.is_graph(pred):
updated_homomorphisms[(pred, graph_id)] =\
compose(
hierarchy.adj[pred][graph_id]["mapping"],
origin_m_origin_prime)
else:
updated_rule_h[(pred, graph_id)] = (
compose(
hierarchy.adj[pred][graph_id]["lhs_mapping"],
origin_m_origin_prime),
compose(
hierarchy.adj[pred][graph_id]["rhs_mapping"],
origin_m_origin_prime)
)
return {
"graphs": updated_graphs,
"homomorphisms": updated_homomorphisms,
"rules": updated_rules,
"rule_homomorphisms": updated_rule_h,
"relations": updated_relations
}
def _propagate_down(hierarchy, origin_id, origin_construct,
rule, instance, rhs_typing_rels, inplace=False):
"""Propagate changes down the hierarchy."""
updated_graphs = dict()
updated_homomorphisms = dict()
updated_relations = []
(origin_m,
origin_m_origin,
origin_prime,
origin_m_origin_prime,
rhs_origin_prime) = origin_construct
if rule.is_relaxing():
for graph in hierarchy.bfs_tree(origin_id):
if graph != origin_id:
relation_g_rhs = set()
for key, values in rhs_typing_rels[graph].items():
for v in values:
relation_g_rhs.add((v, key))
(g_prime, g_g_prime, rhs_g_prime) =\
pushout_from_relation(
hierarchy.get_graph(graph), rule.rhs,
relation_g_rhs, inplace)
updated_graphs[graph] = (g_prime, g_g_prime, rhs_g_prime)
graph_predecessors = hierarchy.predecessors(graph)
if origin_id in graph_predecessors:
updated_homomorphisms[(origin_id, graph)] =\
get_unique_map_from_pushout(
origin_prime.nodes(),
origin_m_origin_prime,
rhs_origin_prime,
compose_chain(
[origin_m_origin,
hierarchy.adj[origin_id][graph]["mapping"],
g_g_prime]),
rhs_g_prime)
if len(rule.added_nodes()) > 0 or\
len(rule.merged_nodes()) > 0:
for pred in hierarchy.predecessors(graph):
if pred in updated_graphs.keys():
if pred != origin_id:
updated_homomorphisms[(pred, graph)] =\
get_unique_map_from_pushout(
updated_graphs[pred][0].nodes(),
updated_graphs[pred][1],
updated_graphs[pred][2],
compose(
hierarchy.adj[
pred][graph]["mapping"],
g_g_prime),
rhs_g_prime)
for suc in hierarchy.successors(graph):
if suc in updated_graphs.keys():
updated_homomorphisms[(graph, suc)] =\
get_unique_map_from_pushout(
g_prime.nodes(),
g_g_prime,
rhs_g_prime,
compose(
hierarchy.adj[graph][suc]["mapping"],
updated_graphs[suc][1]),
updated_graphs[suc][2])
if len(rule.merged_nodes()) > 0:
# propagate changes to adjacent relations
for related_g in hierarchy.adjacent_relations(graph):
updated_relations.append((graph, related_g))
else:
for suc in hierarchy.successors(origin_id):
updated_homomorphisms[(origin_id, suc)] =\
compose(
origin_m_origin,
hierarchy.adj[origin_id][suc]["mapping"])
return {
"graphs": updated_graphs,
"homomorphisms": updated_homomorphisms,
"relations": updated_relations
}
def _apply_changes(hierarchy, upstream_changes, downstream_changes):
"""Apply changes to the hierarchy."""
# update relations
visited = set()
rels = dict()
for g1, g2 in upstream_changes["relations"]:
if (g1, g2) not in visited and (g2, g1) not in visited:
new_relation = dict()
# upstream changes in both related graphs
if (g2, g1) in upstream_changes["relations"]:
# update left side
new_left_dict = dict()
left_dict = hierarchy.relation[g1][g2]
for node in upstream_changes["graphs"][g1][0].nodes():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
new_left_dict[node] = left_dict[old_node]
# update right side
new_right_dict = dict()
right_dict = hierarchy.relation[g2][g1]
for node in upstream_changes["graphs"][g2][0].nodes():
old_node = upstream_changes["graphs"][g2][1][node]
if old_node in right_dict.keys():
new_right_dict[node] = right_dict[old_node]
new_relation = compose_relation_dicts(
new_left_dict, new_right_dict)
# downstream changes in one of the related graphs
elif "relations" in downstream_changes.keys() and\
"graphs" in downstream_changes.keys() and\
(g2, g1) in downstream_changes["relations"]:
# update left side
left_dict = hierarchy.relation[g1][g2]
for node in upstream_changes["graphs"][g1][0].nodes():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
for right_el in left_dict[old_node]:
if node in new_relation.keys():
new_relation[node].add(
downstream_changes[
"graphs"][g2][1][right_el])
else:
new_relation[node] =\
{downstream_changes[
"graphs"][g2][1][right_el]}
# updates in a single graph involved in the relation
else:
left_dict = hierarchy.relation[g1][g2]
for node in upstream_changes["graphs"][g1][0].nodes():
if node in upstream_changes["graphs"][g1][1].keys():
old_node = upstream_changes["graphs"][g1][1][node]
if old_node in left_dict.keys():
for el in left_dict[old_node]:
if node in new_relation.keys():
new_relation[node].add(el)
else:
new_relation[node] = {el}
rels[(g1, g2)] = new_relation
visited.add((g1, g2))
if "relations" in downstream_changes.keys() and\
"graphs" in downstream_changes.keys():
for g1, g2 in downstream_changes["relations"]:
if (g1, g2) not in visited and (g2, g1) not in visited:
# downstream changes in both related graphs
new_relation = dict()
if (g2, g1) in downstream_changes["relations"]:
left_dict = hierarchy.relation[g1][g2]
for left_el, right_els in left_dict.items():
new_left_node =\
downstream_changes["graphs"][g1][1][left_el]
for right_el in right_els:
new_right_node =\
downstream_changes["graphs"][g2][1][right_el]
if new_left_node in new_relation.keys():
new_relation[new_left_node].add(new_right_node)
else:
new_relation[new_left_node] = {new_right_node}
else:
left_dict = hierarchy.relation[g1][g2]
for left_el, right_els in left_dict.items():
new_left_node =\
downstream_changes["graphs"][g1][1][left_el]
for right_el in right_els:
if new_left_node in new_relation.keys():
new_relation[new_left_node].add(right_el)
else:
new_relation[new_left_node] = {right_el}
rels[(g1, g2)] = new_relation
visited.add((g1, g2))
# updated graphs
updated_graphs = dict()
for graph, (graph_m, _, graph_prime, _) in upstream_changes["graphs"].items():
if graph_prime is not None:
updated_graphs[graph] = graph_prime
else:
updated_graphs[graph] = graph_m
if "graphs" in downstream_changes.keys():
for graph, (graph_prime, _, _) in downstream_changes["graphs"].items():
updated_graphs[graph] = graph_prime
# update homomorphisms
updated_homomorphisms = dict()
updated_homomorphisms.update(upstream_changes["homomorphisms"])
if "homomorphisms" in downstream_changes.keys():
updated_homomorphisms.update(downstream_changes["homomorphisms"])
hierarchy._update(
updated_graphs,
updated_homomorphisms,
rels,
upstream_changes["rules"],
upstream_changes["rule_homomorphisms"])
return
def _get_rule_liftings(hierarchy, origin_id, rule, instance,
p_typing=None, ignore=None):
if ignore is None:
ignore = []
if p_typing is None:
p_typing = {}
liftings = {}
if rule.is_restrictive():
for graph in hierarchy.bfs_tree(origin_id, reverse=True):
if graph not in ignore:
if graph != origin_id:
# find the lifting to a graph
if hierarchy.is_graph(graph):
origin_typing = hierarchy.get_typing(graph, origin_id)
# Compute L_G
l_g, l_g_g, l_g_l = pullback(
hierarchy.get_graph(graph), rule.lhs,
hierarchy.get_graph(origin_id),
origin_typing, instance)
# Compute canonical P_G
canonical_p_g, p_g_l_g, p_g_p = pullback(
l_g, rule.p, rule.lhs, l_g_l, rule.p_lhs)
# Remove controlled things from P_G
if graph in p_typing.keys():
l_g_factorization = {
keys_by_value(l_g_g, k)[0]: v
for k, v in p_typing[graph].items()
}
p_g_nodes_to_remove = set()
for n in canonical_p_g.nodes():
l_g_node = p_g_l_g[n]
# If corresponding L_G node is specified in
# the controlling relation, remove all
# the instances of P nodes not mentioned
# in this relations
if l_g_node in l_g_factorization.keys():
p_nodes = l_g_factorization[l_g_node]
if p_g_p[n] not in p_nodes:
del p_g_p[n]
del p_g_l_g[n]
p_g_nodes_to_remove.add(n)
for n in p_g_nodes_to_remove:
primitives.remove_node(canonical_p_g, n)
liftings[graph] = {
"rule": Rule(p=canonical_p_g, lhs=l_g, p_lhs=p_g_l_g),
"instance": l_g_g,
"l_g_l": l_g_l,
"p_g_p": p_g_p
}
return liftings
def _get_rule_projections(hierarchy, origin_id, rule, instance,
rhs_typing=None, ignore=None):
if ignore is None:
ignore = []
if rhs_typing is None:
rhs_typing = {}
projections = {}
if rule.is_relaxing():
for graph, origin_typing in hierarchy.get_descendants(origin_id).items():
if graph not in ignore:
if hierarchy.is_graph(graph):
# Compute canonical P_T
l_t, l_l_t, l_t_t = image_factorization(
rule.lhs, hierarchy.get_graph(graph),
compose(instance, origin_typing))
# Compute canonical R_T
r_t, l_t_r_t, r_r_t = pushout(
rule.p, l_t, rule.rhs,
l_l_t, rule.p_rhs)
# Modify P_T and R_T according to the controlling
# relation rhs_typing
if graph in rhs_typing.keys():
r_t_factorization = {
r_r_t[k]: v
for k, v in rhs_typing[graph].items()
}
added_t_nodes = set()
for n in r_t.nodes():
if n in r_t_factorization.keys():
# If corresponding R_T node is specified in
# the controlling relation add nodes of T
# that type it to P
t_nodes = r_t_factorization[n]
for t_node in t_nodes:
if t_node not in l_t_t.values() and\
t_node not in added_t_nodes:
new_p_node = primitives.generate_new_node_id(
l_t, t_node)
primitives.add_node(l_t, new_p_node)
added_t_nodes.add(t_node)
l_t_r_t[new_p_node] = n
l_t_t[new_p_node] = t_node
else:
l_t_r_t[keys_by_value(l_t_t, t_node)[0]] = n
projections[graph] = {
"rule": Rule(lhs=l_t, p=l_t, rhs=r_t, p_rhs=l_t_r_t),
"instance": l_t_t,
"l_l_t": l_l_t,
"p_p_t": {k: l_l_t[v] for k, v in rule.p_lhs.items()},
"r_r_t": r_r_t
}
return projections
def get_rule_hierarchy(hierarchy, origin_id, rule, instance,
liftings, projections):
"""Get a hierarchy of rules."""
rule_hierarchy = {
"rules": {},
"rule_homomorphisms": {}
}
rule_hierarchy["rules"][origin_id] = rule
instances = {origin_id: instance}
for graph, data in liftings.items():
rule_hierarchy["rules"][graph] = data["rule"]
instances[graph] = data["instance"]
for successor in hierarchy.successors(graph):
old_typing = hierarchy.get_typing(graph, successor)
if successor == origin_id:
graph_lhs_successor_lhs = data["l_g_l"]
graph_p_successor_p = data["p_g_p"]
rule_hierarchy["rule_homomorphisms"][(graph, successor)] = (
graph_lhs_successor_lhs,
graph_p_successor_p,
graph_p_successor_p
)
else:
l_graph_successor = compose(
liftings[graph]["instance"],
old_typing)
# already lifted to the successor
if successor in liftings:
p_graph_successor = compose(
liftings[graph]["rule"].p_lhs,
l_graph_successor)
p_successor_successor = compose(
liftings[successor]["rule"].p_lhs,
liftings[successor]["instance"])
graph_lhs_successor_lhs = {}
for k, v in l_graph_successor.items():
l_node_g = liftings[graph]["l_g_l"][k]
for vv in keys_by_value(liftings[successor]["instance"], v):
l_node_s = liftings[successor]["l_g_l"][vv]
if (l_node_s == l_node_g):
graph_lhs_successor_lhs[k] = vv
break
graph_p_successor_p = {}
for k, v in p_graph_successor.items():
p_node_g = liftings[graph]["p_g_p"][k]
for vv in keys_by_value(p_successor_successor, v):
p_node_s = liftings[successor]["p_g_p"][vv]
if (p_node_s == p_node_g):
graph_p_successor_p[p_node_g] = p_node_s
break
rule_hierarchy["rule_homomorphisms"][(graph, successor)] = (
graph_lhs_successor_lhs,
graph_p_successor_p,
graph_p_successor_p
)
elif successor in projections:
rule_hierarchy["rule_homomorphisms"][(graph, successor)] = (
compose(liftings[graph]["l_g_l"],
projections[successor]["l_l_t"]),
compose(liftings[graph]["p_g_p"],
projections[successor]["p_p_t"]),
compose(
compose(liftings[graph]["p_g_p"],
rule.p_rhs),
projections[successor]["r_r_t"])
)
# didn't touch the successor or projected to it
else:
pass
for graph, data in projections.items():
rule_hierarchy["rules"][graph] = data["rule"]
instances[graph] = data["instance"]
for predecessor in hierarchy.predecessors(graph):
old_typing = hierarchy.get_typing(predecessor, graph)
if predecessor == origin_id:
predecessor_l_graph_l = data["l_l_t"]
predecessor_p_graph_p = data["p_p_t"]
predecessor_rhs_graph_rhs = data["r_r_t"]
rule_hierarchy["rule_homomorphisms"][(predecessor, graph)] = (
predecessor_l_graph_l,
predecessor_p_graph_p,
predecessor_rhs_graph_rhs
)
else:
# already projected to the predecessor
if predecessor in projections:
l_pred_graph = compose(
projections[predecessor]["instance"],
old_typing)
predecessor_l_graph_l = {}
for k, v in projections[
predecessor]["instance"].items():
predecessor_l_graph_l[k] = keys_by_value(
projections[graph]["instance"],
l_pred_graph[k])[0]
predecessor_rhs_graph_rhs = {}
for r_node, r_pred_node in projections[
predecessor]["r_r_t"].items():
p_pred_nodes = keys_by_value(
projections[predecessor][
"rule"].p_rhs, r_pred_node)
for v in p_pred_nodes:
p_graph_node = predecessor_l_graph_l[v]
r_graph_node = projections[graph][
"rule"].p_rhs[p_graph_node]
if len(p_pred_nodes) == 0:
r_graph_node = projections[graph]["r_r_t"][
r_node]
predecessor_rhs_graph_rhs[r_pred_node] = r_graph_node
rule_hierarchy["rule_homomorphisms"][(predecessor, graph)] = (
predecessor_l_graph_l,
predecessor_l_graph_l,
predecessor_rhs_graph_rhs
)
# didn't touch the predecessor or lifter to it
else:
pass
return rule_hierarchy, instances
def _refine_rule_hierarchy(hierarchy, rule_hierarchy, lhs_instances):
new_lhs_instances = {}
new_rules = {}
new_rule_homomorphisms = {}
for graph, rule in rule_hierarchy["rules"].items():
# refine rule
new_lhs_instance = rule.refine(
hierarchy.get_graph(graph), lhs_instances[graph])
new_lhs_instances[graph] = new_lhs_instance
# Update rule homomorphisms
for (source, target), (lhs_h, p_h, rhs_h) in rule_hierarchy[
"rule_homomorphisms"].items():
typing = hierarchy.get_typing(source, target)
source_rule = rule_hierarchy["rules"][source]
target_rule = rule_hierarchy["rules"][target]
for node in source_rule.lhs.nodes():
if node not in lhs_h.keys():
source_node = new_lhs_instances[source][node]
target_node = typing[source_node]
target_lhs_node = keys_by_value(
new_lhs_instances[target], target_node)[0]
lhs_h[node] = target_lhs_node
if node in source_rule.p_lhs.values():
source_p_node = keys_by_value(source_rule.p_lhs, node)[0]
target_p_node = keys_by_value(target_rule.p_lhs, node)[0]
p_h[source_p_node] = target_p_node
source_rhs_node = source_rule.p_rhs[source_p_node]
target_rhs_node = target_rule.p_rhs[target_p_node]
rhs_h[source_rhs_node] = target_rhs_node
if len(rule_hierarchy["rules"]) == 0:
for graph in hierarchy.graphs():
rule_hierarchy["rules"][graph] = Rule.identity_rule()
new_lhs_instances[graph] = dict()
for (s, t) in hierarchy.typings():
rule_hierarchy["rule_homomorphisms"][(s, t)] = (dict(), dict(), dict())
else:
for graph, rule in rule_hierarchy["rules"].items():
# add identity rules where needed
# to preserve the info on p/rhs_typing
# add ancestors that are not included in rule hierarchy
for ancestor, typing in hierarchy.get_ancestors(graph).items():
if ancestor not in rule_hierarchy["rules"] and\
ancestor not in new_rules:
# Find a typing of ancestor by the graph
l_pred, l_pred_pred, l_pred_l_graph = pullback(
hierarchy.get_graph(ancestor), rule.lhs,
hierarchy.get_graph(graph), typing, new_lhs_instances[graph])
new_rules[ancestor] = Rule(p=l_pred, lhs=l_pred)
new_lhs_instances[ancestor] = l_pred_pred
r_pred_r_graph = {
v: rule.p_rhs[k]
for k, v in l_pred_l_graph.items()
}
for successor in hierarchy.successors(ancestor):
if successor in rule_hierarchy["rules"]:
if successor == graph:
new_rule_homomorphisms[(ancestor, graph)] = (
l_pred_l_graph, l_pred_l_graph, r_pred_r_graph
)
else:
path = hierarchy.shortest_path(graph, successor)
lhs_h, p_h, rhs_h = rule_hierarchy["rule_homomorphisms"][
(path[0], path[1])]
for i in range(2, len(path)):
new_lhs_h, new_p_h, new_rhs_h = rule_hierarchy[
"rule_homomorphisms"][(path[i - 1], path[i])]
lhs_h = compose(lhs_h, new_lhs_h)
p_h = compose(p_h, new_p_h)
rhs_h = compose(rhs_h, new_rhs_h)
new_rule_homomorphisms[(ancestor, successor)] = (
compose(l_pred_l_graph, lhs_h),
compose(l_pred_l_graph, p_h),
compose(r_pred_r_graph, rhs_h)
)
if successor in new_rules:
lhs_h = {
k: keys_by_value(
new_lhs_instances[successor],
hierarchy.get_typing(ancestor, successor)[v])[0]
for k, v in new_lhs_instances[ancestor].items()
}
new_rule_homomorphisms[(ancestor, successor)] = (
lhs_h, lhs_h, lhs_h
)
for predecessor in hierarchy.predecessors(ancestor):
if predecessor in rule_hierarchy["rules"] or\
predecessor in new_rules:
lhs_h = {
k: keys_by_value(
new_lhs_instances[ancestor],
hierarchy.get_typing(predecessor, ancestor)[v])[0]
for k, v in new_lhs_instances[predecessor].items()
}
new_rule_homomorphisms[(predecessor, ancestor)] = (
lhs_h, lhs_h, lhs_h
)
for descendant, typing in hierarchy.get_descendants(graph).items():
if descendant not in rule_hierarchy["rules"] and\
descendant not in new_rules:
l_suc, l_graph_l_suc, l_suc_suc = image_factorization(
rule.lhs, hierarchy.get_graph(descendant),
compose(
new_lhs_instances[graph],
typing))
new_rules[descendant] = Rule(p=l_suc, lhs=l_suc)
new_lhs_instances[descendant] = l_suc_suc
p_graph_p_suc = {
k: l_graph_l_suc[v]
for k, v in rule.p_lhs.items()
}
for predecessor in hierarchy.predecessors(descendant):
if predecessor in rule_hierarchy["rules"]:
if predecessor == graph:
new_rule_homomorphisms[(predecessor, descendant)] = (
l_graph_l_suc, p_graph_p_suc, p_graph_p_suc
)
else:
path = hierarchy.shortest_path(predecessor, graph)
lhs_h, p_h, rhs_h = rule_hierarchy["rule_homomorphisms"][
(path[0], path[1])]
for i in range(2, len(path)):
new_lhs_h, new_p_h, new_rhs_h = rule_hierarchy[
"rule_homomorphisms"][(path[i - 1], path[i])]
lhs_h = compose(lhs_h, new_lhs_h)
p_h = compose(p_h, new_p_h)
rhs_h = compose(rhs_h, new_rhs_h)
new_rule_homomorphisms[(predecessor, descendant)] = (
compose(lhs_h, l_graph_l_suc),
compose(p_h, p_graph_p_suc),
compose(rhs_h, p_graph_p_suc)
)
if predecessor in new_rules:
lhs_h = {
k: keys_by_value(
new_lhs_instances[descendant],
hierarchy.get_typing(predecessor, descendant)[v])[0]
for k, v in new_lhs_instances[predecessor].items()
}
new_rule_homomorphisms[(predecessor, descendant)] = (
lhs_h, lhs_h, lhs_h
)
for successor in hierarchy.successors(descendant):
if successor in rule_hierarchy["rules"] or\
successor in new_rules:
lhs_h = {
k: keys_by_value(
new_lhs_instances[successor],
hierarchy.get_typing(descendant, successor)[v])[0]
for k, v in new_lhs_instances[descendant].items()
}
new_rule_homomorphisms[(descendant, successor)] = (
lhs_h, lhs_h, lhs_h
)
rule_hierarchy["rules"].update(new_rules)
rule_hierarchy["rule_homomorphisms"].update(
new_rule_homomorphisms)
return new_lhs_instances
| {
"repo_name": "Kappa-Dev/ReGraph",
"path": "regraph/backends/networkx/utils/rewriting_utils.py",
"copies": "1",
"size": "45802",
"license": "mit",
"hash": -8376607582227996000,
"line_mean": 45.3114256825,
"line_max": 89,
"alpha_frac": 0.429260731,
"autogenerated": false,
"ratio": 4.504523996852872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5433784727852872,
"avg_score": null,
"num_lines": null
} |
"""A collection of (internal usage) utils for rule type checking."""
import networkx as nx
from networkx.exception import NetworkXNoPath
from regraph.category_op import check_homomorphism
from regraph.exceptions import RewritingError
from regraph.utils import keys_by_value, format_typing
def _autocomplete_typing(hierarchy, graph_id, instance,
lhs_typing, rhs_typing_rel, p_lhs, p_rhs):
if len(hierarchy.successors(graph_id)) > 0:
if lhs_typing is None:
new_lhs_typing = dict()
else:
new_lhs_typing = format_typing(lhs_typing)
if rhs_typing_rel is None:
new_rhs_typing_rel = dict()
else:
new_rhs_typing_rel = format_typing(rhs_typing_rel)
for g, typing_rel in new_rhs_typing_rel.items():
for key, values in typing_rel.items():
value_set = set()
if type(values) == str:
value_set.add(values)
else:
try:
for v in values:
value_set.add(v)
except TypeError:
value_set.add(v)
new_rhs_typing_rel[g][key] = value_set
ancestors = hierarchy.get_ancestors(graph_id)
for anc, anc_typing in ancestors.items():
if anc not in new_rhs_typing_rel.keys():
new_rhs_typing_rel[anc] = dict()
merged_nodes = set()
for r_node in p_rhs.values():
p_nodes = keys_by_value(p_rhs, r_node)
if len(p_nodes) > 1:
merged_nodes.add(r_node)
for typing_graph in hierarchy.successors(graph_id):
typing = hierarchy.edge[graph_id][typing_graph].mapping
# Autocomplete lhs and rhs typings
# by immediate successors induced by an instance
for (source, target) in instance.items():
if typing_graph not in new_lhs_typing.keys():
new_lhs_typing[typing_graph] = dict()
if source not in new_lhs_typing[typing_graph].keys():
if target in typing.keys():
new_lhs_typing[typing_graph][source] = typing[target]
for (p_node, l_node) in p_lhs.items():
if l_node in new_lhs_typing[typing_graph].keys():
if p_rhs[p_node] not in new_rhs_typing_rel[typing_graph].keys():
new_rhs_typing_rel[typing_graph][p_rhs[p_node]] = set()
new_rhs_typing_rel[typing_graph][p_rhs[p_node]].add(
new_lhs_typing[typing_graph][l_node])
# Second step of autocompletion of rhs typing
for graph, typing in new_rhs_typing_rel.items():
ancestors = hierarchy.get_ancestors(graph)
for ancestor, ancestor_typing in ancestors.items():
dif = set(typing.keys()) -\
set(new_rhs_typing_rel[ancestor].keys())
for node in dif:
type_set = set()
for el in new_rhs_typing_rel[graph][node]:
type_set.add(ancestor_typing[el])
new_rhs_typing_rel[ancestor][node] = type_set
return (new_lhs_typing, new_rhs_typing_rel)
else:
return (None, None)
def _check_self_consistency(hierarchy, typing, strict=True):
for typing_graph, mapping in typing.items():
ancestors = hierarchy.get_ancestors(typing_graph)
for anc, anc_typing in ancestors.items():
if anc in typing.keys():
for key, value in mapping.items():
if key in typing[anc].keys():
if type(value) == str:
if value in anc_typing.keys() and\
anc_typing[value] != typing[anc][key]:
raise RewritingError(
"Node '%s' is typed as "
"'%s' and '%s' in the graph '%s'" %
(key, anc_typing[value], typing[anc][key],
anc))
else:
try:
for val in value:
if val in anc_typing.keys() and\
anc_typing[val] not in typing[anc][key]:
raise RewritingError(
"Node '%s' is typed as "
"'%s' and '%s' in the graph '%s'" %
(key, anc_typing[val],
", ".join(typing[anc][key]),
anc))
except TypeError:
if value in anc_typing.keys() and\
anc_typing[value] != typing[anc][key]:
raise RewritingError(
"Node '%s' is typed as "
"'%s' and '%s' in the graph '%s'" %
(key, anc_typing[value],
", ".join(typing[anc][key]),
anc))
def _check_lhs_rhs_consistency(hierarchy, graph_id, rule, instance,
lhs_typing, rhs_typing, strict=True):
"""Check consistency of typing of the lhs and the rhs of the rule."""
for typing_graph, typing in lhs_typing.items():
for p_node in rule.p.nodes():
if typing_graph in rhs_typing.keys():
if strict is True and\
rule.p_rhs[p_node] in rhs_typing[typing_graph].keys() and\
len(rhs_typing[typing_graph][rule.p_rhs[p_node]]) > 1:
raise RewritingError(
"Inconsistent typing of the rule: node "
"'%s' from the preserved part is typed "
"by a graph '%s' as '%s' from the lhs and "
"as a '%s' from the rhs." %
(p_node, typing_graph,
typing[rule.p_lhs[p_node]],
", ".join(
rhs_typing[typing_graph][rule.p_rhs[p_node]])))
typing_graph_ancestors = hierarchy.get_ancestors(typing_graph)
for anc, anc_typing in typing_graph_ancestors.items():
if anc in rhs_typing.keys():
if rule.p_rhs[p_node] in rhs_typing[anc]:
if strict is True:
if len(rhs_typing[anc][rule.p_rhs[p_node]]) > 1:
raise RewritingError(
"Inconsistent typing of the rule: node "
"'%s' from the preserved part is typed "
"by a graph '%s' as '%s' from the lhs and "
"as a '%s' from the rhs." %
(p_node, anc,
anc_typing[typing[rule.p_lhs[p_node]]],
", ".join(
rhs_typing[anc][rule.p_rhs[p_node]])))
if len(rhs_typing[anc][rule.p_rhs[p_node]]) == 1 and\
anc_typing[typing[rule.p_lhs[p_node]]] not in\
rhs_typing[anc][rule.p_rhs[p_node]]:
raise RewritingError(
"Inconsistent typing of the rule: node "
"'%s' from the preserved part is typed "
"by a graph '%s' as '%s' from the lhs and "
"as a '%s' from the rhs." %
(p_node, anc,
anc_typing[typing[rule.p_lhs[p_node]]],
list(rhs_typing[anc][rule.p_rhs[p_node]])[0]))
def _check_totality(hierarchy, graph_id, rule, instance,
lhs_typing, rhs_typing):
""""Check that everything is typed at the end of the rewriting."""
for node in rule.rhs.nodes():
p_nodes = keys_by_value(rule.p_rhs, node)
for typing_graph in hierarchy.successors(graph_id):
typing = hierarchy.edge[graph_id][typing_graph].mapping
# Totality can be broken in two cases
if len(p_nodes) > 1:
# node will be merged
all_untyped = True
for p_node in p_nodes:
if instance[rule.p_lhs[p_node]] in typing.keys():
all_untyped = False
break
if all_untyped:
continue
if typing_graph in rhs_typing.keys() and\
node in rhs_typing[typing_graph].keys():
continue
else:
raise RewritingError(
"Rewriting is strict (no propagation of types is "
"allowed), typing of the node `%s` "
"in rhs is required (typing by the following "
"graph stays unresolved: '%s')!" %
(node, typing_graph))
def _check_instance(hierarchy, graph_id, pattern, instance, pattern_typing):
check_homomorphism(
pattern,
hierarchy.node[graph_id].graph,
instance,
total=True
)
# check that instance typing and lhs typing coincide
for node in pattern.nodes():
if pattern_typing:
for typing_graph, typing in pattern_typing.items():
try:
instance_typing = hierarchy.compose_path_typing(
nx.shortest_path(hierarchy, graph_id, typing_graph))
if node in pattern_typing.keys() and\
instance[node] in instance_typing.keys():
if typing[node] != instance_typing[instance[node]]:
raise RewritingError(
"Typing of the instance of LHS does not " +
" coincide with typing of LHS!")
except NetworkXNoPath:
raise RewritingError(
"Graph '%s' is not typed by '%s' specified "
"as a typing graph of the lhs of the rule." %
(graph_id, typing_graph))
| {
"repo_name": "eugeniashurko/ReGraph",
"path": "regraph/rule_type_checking.py",
"copies": "1",
"size": "10746",
"license": "mit",
"hash": -6691866400532814000,
"line_mean": 47.8454545455,
"line_max": 84,
"alpha_frac": 0.4536571748,
"autogenerated": false,
"ratio": 4.561120543293718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5514777718093719,
"avg_score": null,
"num_lines": null
} |
"""A collection of marathon-related utils
"""
# future imports
from __future__ import absolute_import
# third-party imports
import requests
# local imports
from .deployment import DeploymentNotFound
from .deployment import MarathonDeployment
from shpkpr import exceptions
class ClientError(exceptions.ShpkprException):
pass
class DryRun(exceptions.ShpkprException):
exit_code = 0
class MarathonClient(object):
"""A thin wrapper around marathon.MarathonClient for internal use
"""
def __init__(self, marathon_url, username=None, password=None, dry_run=False):
self._marathon_url = marathon_url
self._dry_run = dry_run
self._basic_auth = None
if None not in [username, password]:
self._basic_auth = requests.auth.HTTPBasicAuth(username, password)
def _build_url(self, path):
return self._marathon_url.rstrip("/") + path
def _make_request(self, method, path, **kwargs):
if self._dry_run:
raise DryRun("Exiting as --dry-run requested")
request = getattr(requests, method.lower())
return request(self._build_url(path), auth=self._basic_auth, **kwargs)
def embed_params(self, entity_type):
return [
"{0}.tasks".format(entity_type),
"{0}.counts".format(entity_type),
"{0}.deployments".format(entity_type),
"{0}.lastTaskFailure".format(entity_type),
"{0}.taskStats".format(entity_type),
]
def get_info(self):
""" Returns the marathon info from the /info endpoint
"""
path = "/v2/info"
response = self._make_request('GET', path)
if response.status_code == 200:
return response.json()
raise ClientError("Unable to retrieve info from marathon")
def delete_application(self, application_id, force=False):
"""Deletes the Application corresponding with application_id
"""
path = "/v2/apps/" + application_id
params = {"force": "true"} if force else {}
response = self._make_request('DELETE', path, params=params)
if response.status_code == 200:
return True
else:
return False
def get_application(self, application_id):
"""Returns detailed information for a single application.
"""
path = "/v2/apps/" + application_id
params = {"embed": self.embed_params("app")}
response = self._make_request('GET', path, params=params)
if response.status_code == 200:
application = response.json()['app']
return application
# raise an appropriate error if something went wrong
if response.status_code == 404:
raise ClientError("Unable to retrieve application details from marathon: does not exist.")
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
def list_applications(self):
"""Return a list of all applications currently deployed to marathon.
"""
path = "/v2/apps"
params = {"embed": self.embed_params("apps")}
response = self._make_request('GET', path, params=params)
if response.status_code == 200:
applications = response.json()['apps']
application_list = []
for app in applications:
application_list.append(app)
return application_list
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
def deploy(self, application_payload, force=False):
"""Deploys the given application(s) to Marathon.
"""
# if the payload is a list and is one element long then we extract it
# as we want to treat single app deploys differently. Doing this here
# helps keep the cmd implementation clean.
if isinstance(application_payload, (list, tuple)) and len(application_payload) == 1:
application_payload = application_payload[0]
# if at this point our payload is a dict then we treat it as a single
# app, otherwise we treat it as a list of multiple applications to be
# deployed together.
if isinstance(application_payload, (list, tuple)):
path = "/v2/apps/"
else:
path = "/v2/apps/" + application_payload['id']
params = {"force": "true"} if force else {}
response = self._make_request('PUT', path, params=params, json=application_payload)
if response.status_code in [200, 201]:
deployment = response.json()
return MarathonDeployment(self, deployment['deploymentId'])
# raise an appropriate error if something went wrong
if response.status_code == 409:
deployment_ids = ', '.join([x['id'] for x in response.json()['deployments']])
raise ClientError("App(s) locked by one or more deployments: %s" % deployment_ids)
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
def get_deployment(self, deployment_id):
"""Returns detailed information for a single deploy
"""
response = self._make_request('GET', "/v2/deployments")
if response.status_code == 200:
for deployment in response.json():
if deployment['id'] == deployment_id:
return deployment
raise DeploymentNotFound(deployment_id)
raise ClientError("Unknown Marathon error: %s\n\n%s" % (response.status_code, response.text))
| {
"repo_name": "shopkeep/shpkpr",
"path": "shpkpr/marathon/client.py",
"copies": "1",
"size": "5601",
"license": "mit",
"hash": -5074475384342780000,
"line_mean": 36.5906040268,
"line_max": 102,
"alpha_frac": 0.6207820032,
"autogenerated": false,
"ratio": 4.262557077625571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5383339080825571,
"avg_score": null,
"num_lines": null
} |
""" A collection of methods for handling data gathered through the
StackExchange API. Includes methods for extracting metric data from
StackExchange JSON archives, and methods for putting that data in to a
MySQL database.
"""
import json
import pymysql
import model
import numpy as np
# List of fields that will occur in the database of cleaned data
fields = ['id', 'authorrep', 'calculus', 'colons', 'commands', 'commas',
'dollars', 'doubledollars', 'effort', 'emoticons', 'homework', 'numtags',
'paragraphs', 'periods', 'pleas', 'politeness', 'postlength', 'precalc',
'questionmarks', 'questions', 'quotes', 'spaces', 'titlelength',
'txtspeak', 'closed']
def extract_data_dict(item):
"""Given item (a dict extracted from StackExchange JSON data), extract
metrics and return a dict of the results.
"""
# Define some word groups to search for as part of data measurement.
demands = ['prove', 'Prove', 'show', 'Show', 'compute', 'Compute',
'calculate', 'Calculate', 'find', 'Find', 'Explain', 'explain']
effort = ['I tried', "I've tried", "My attempt", 'my attempt',
'work so far']
emoticons = [':)', ':-)', ':(', ':-(', ':D', ':-D', ';-)', ';)', '(:', '):',
':$', ':-$']
pleas = ['help', 'Help', "don't understand", "don't get it",
"don't see how", 'show me', 'Show me', 'stuck', 'Stuck']
polite = ['please', 'Please', 'thanks', 'Thanks', 'Thank you', 'thank you']
questions = ['where', 'Where', 'what', 'What', 'when', 'When', 'why',
'Why', 'how', 'How', 'who', 'Who']
txtspeak = [' u ', 'pls', 'Pls', 'Thx', 'thx']
stats = dict()
# Handle the fact that posts are occasionally anonymous by declaring such
# users to have the minimum possible reputation (1)
if 'owner' in item and 'reputation' in item['owner']:
stats['authorrep'] = item['owner']['reputation']
else:
stats['authorrep'] = 1
# If question has been closed, check to see if it is for the desired reason
if 'closed_details' in item:
desc = item['closed_details']['description']
stats['closed'] = int('context' in desc)
else:
stats['closed'] = 0
stats['calculus'] = int('calculus' in item['tags'] or
'multivariable-calculus' in item['tags'])
stats['colons'] = item['body'].count(':')
stats['commands'] = sum([item['body'].count(word) for word in demands])
stats['commas'] = item['body'].count(',')
stats['dollars'] = item['body'].count('$')
stats['doubledollars'] = item['body'].count('$$')
stats['effort'] = sum([item['body'].count(word) for word in effort])
stats['emoticons'] = sum([item['body'].count(word) for word in emoticons])
stats['homework'] = int('homework' in item['tags'])
stats['id'] = item['question_id']
stats['numtags'] = len(item['tags'])
stats['paragraphs'] = item['body'].count('<p>')
stats['periods'] = item['body'].count('.')
stats['pleas'] = sum([item['body'].count(word) for word in pleas])
stats['politeness'] = sum([item['body'].count(word) for word in polite])
stats['postlength'] = len(item['body'])
stats['precalc'] = int('algebra-precalculus' in item['tags'])
stats['questionmarks'] = item['body'].count('?')
stats['questions'] = sum([item['body'].count(word) for word in questions])
stats['quotes'] = item['body'].count('"') + item['body'].count("'")
stats['spaces'] = item['body'].count(' ')
stats['titlelength'] = len(item['title'])
stats['txtspeak'] = sum([item['body'].count(word) for word in txtspeak])
return stats
def extract_data_vector(item, include_closed=False, include_id=False):
"""Given item (a dict extracted from StackExchange JSON data), return
a list of the extracted data, in the order desired by the database.
include_closed: Do you want the closed status of the post?
include_id: Do you want the question ID of the post? (Key for dbase)
"""
stats = extract_data_dict(item)
if include_closed:
end = len(fields)
else:
end = len(fields) - 1
if include_id:
start = 0
else:
start = 1
vec = tuple(stats[field] for field in fields[start:end])
return vec
def add_to_training_data(posts):
""" Given posts (a list of dicts extracted from StackExchange JSON data),
add posts to the training data stored in the database. The model is then
retrained using all available data.
Note: If a post ID is already in the training database, it is updated with
the newly-extracted measurements.
"""
query = "INSERT INTO trainingdata ("
query += ', '.join(fields) + ") VALUES "
datavecs = [str(tuple(extract_data_vector(item, True, True))) for item in posts]
query += ",\n".join(datavecs)
query += " ON DUPLICATE KEY UPDATE "
query += ','.join(["{0}=VALUES({0})".format(field) for field in fields[1:]])
query += ';\n'
f = open('dbase.conf', 'r')
dbase, user, passwd = f.readline().rstrip().split(',')
f.close()
conn = pymysql.connect(user=user, passwd=passwd, db=dbase)
cur = conn.cursor()
count = cur.execute(query)
conn.commit()
print("Successfully merged {} entries!".format(count))
cur.close()
conn.close()
model.build_model()
def update_live_data(posts):
""" Given posts (a list of dicts extracted from StackExchange JSON data),
replace the current live data with the information in posts.
"""
query = '''INSERT INTO livedata (id, postlink, title, body, userid, username, userrep, userlink, userpic, prediction, prob) VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s") '''
predictions = model.predictions(posts)
probabilities = model.probabilities(posts)
queryvals = []
for post, pred, prob in zip(posts, predictions, probabilities):
queryvals.append((post['question_id'], post['link'], post['title'],
post['body'], post['owner']['user_id'], post['owner']['display_name'], post['owner']['reputation'], post['owner']['link'], post['owner']['profile_image'], float(pred), float(prob)))
f = open('dbase.conf', 'r')
dbase, user, passwd = f.readline().rstrip().split(',')
f.close()
conn = pymysql.connect(user=user, passwd=passwd, db=dbase, charset='utf8')
cur = conn.cursor()
cur.execute("DELETE FROM livedata WHERE 1")
count = cur.executemany(query, queryvals)
conn.commit()
print("Successfully merged {} entries!".format(count))
cur.close()
conn.close()
# If the script is called directly, process the file 'rawtrainingdata.json' to
# extract metric information in to database
if __name__ == "__main__":
rawdatafile = open('rawtrainingdata.json', 'r')
rawdata = json.load(rawdatafile)
add_to_training_data(rawdata)
| {
"repo_name": "nrpeterson/mse-closure-predictor",
"path": "cleandata.py",
"copies": "1",
"size": "6918",
"license": "mit",
"hash": -3693528262514935300,
"line_mean": 38.3068181818,
"line_max": 205,
"alpha_frac": 0.611303845,
"autogenerated": false,
"ratio": 3.599375650364204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.964246083131802,
"avg_score": 0.013643732809236631,
"num_lines": 176
} |
"""A collection of methods for training a classifier to predict whether posts
from Mathematics.StackExchange will be closed due to 'lack of context', and
for using that model to make predictions.
"""
import numpy as np
from scipy.special import expit as sigmoid
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import pickle
import json
import pymysql
import cleandata
def build_model():
"""Fit a classifier to the data stored in our trainingdata database table.
Store the model using pickle, for later use in predictions.
"""
# Read in the data
f = open('dbase.conf', 'r')
dbase, user, passwd = f.readline().rstrip().split(',')
f.close()
conn = pymysql.connect(user=user, passwd=passwd, db=dbase)
cur = conn.cursor()
print("Fetching training data...")
count = cur.execute("SELECT * FROM trainingdata")
print("Done! Fetched {} training records.\n".format(count))
data = np.array([row for row in cur], dtype=np.float64)
cur.close()
conn.close()
# Split the data up in to our inputs (X) and outputs (y)
X = data[:, 1:-1]
y = data[:, -1]
# Set up the scaler, and transform the data
print("Scaling data...")
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
print("Done.\n")
# Initialize the classifier:
print("Training the classifier...")
classifier = SVC(probability=True) #LogisticRegression(C=20.)
classifier.fit(X_scaled, y)
print("Done. Classifier score: {:04f}".format(classifier.score(X_scaled, y)))
print("Storing model parameters...")
with open('model.pickle', 'wb') as f:
pickle.dump((scaler, classifier), f)
print("Done!")
def probabilities(posts):
"""Given a collection of posts (in StackExchange JSON format), return our
model's estimated probabilities that the posts will be closed.
"""
# Read in Model
with open('model.pickle', 'rb') as f:
scaler, classifier = pickle.load(f)
data = [cleandata.extract_data_vector(post) for post in posts]
X = np.array(data, dtype=np.float64)
X_scaled = scaler.transform(X)
probs = classifier.predict_proba(X_scaled)[:,1]
return probs
def predictions(posts):
"""Given a collection of posts (in StackExchange JSON format), return our
model's predictions for whether or not each post will be closed.
"""
# Read in Model
with open('model.pickle', 'rb') as f:
scaler, classifier = pickle.load(f)
data = [cleandata.extract_data_vector(post) for post in posts]
X = np.array(data, dtype=np.float64)
X_scaled = scaler.transform(X)
preds = classifier.predict(X_scaled)
return preds
# If called as a script: rebuild the model.
if __name__ == '__main__':
build_model()
| {
"repo_name": "nrpeterson/mse-closure-predictor",
"path": "model.py",
"copies": "1",
"size": "2936",
"license": "mit",
"hash": 5167383461531173000,
"line_mean": 30.5698924731,
"line_max": 81,
"alpha_frac": 0.670640327,
"autogenerated": false,
"ratio": 3.842931937172775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5013572264172774,
"avg_score": null,
"num_lines": null
} |
"""A collection of miscellaneous functions to operate on iterables
The pairwise function is different from the recipe version in that it is an
explicit generator that walks the iterable in order to avoid tee.
The partition function is different from the recipe version in that it returns
lists instead of generators as tee would have also generated the overhead and
the predicate would have run twice for each element.
The powerset function is different from the recipe version in that it provides
a reverse argument to allow iteration by size ascendingly and descendingly.
"""
import collections
from itertools import islice, chain, combinations
__all__ = ['chunk', 'divide', 'divide_sizes', 'multi_map', 'pairwise',
'partition', 'powerset', 'split']
def pairwise(iterable):
"""Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
"""
iterable = iter(iterable)
left = next(iterable)
for right in iterable:
yield left, right
left = right
def partition(pred, iterable):
"""Partition an iterable.
Arguments
---------
pred : function
A function that takes an element of the iterable and returns
a boolen indicating to which partition it belongs
iterable : iterable
Returns
-------
A two-tuple of lists with the first list containing the elements on which
the predicate indicated False and the second list containing the elements
on which the predicate indicated True.
Note that, unlike the recipe which returns generators, this version
returns lists.
"""
pos, neg = [], []
pos_append, neg_append = pos.append, neg.append
for elem in iterable:
if pred(elem):
pos_append(elem)
else:
neg_append(elem)
return neg, pos
def powerset(iterable, *, reverse=False):
"""Return the powerset.
Arguments
---------
iterable : iterable
reverse : boolean
Indicates whether the powerset should be returned descending by
size
Returns
-------
A generator producing each element of the powerset.
"""
lst = list(iterable)
if reverse:
rng = range(len(lst), -1, -1)
else:
rng = range(len(lst) + 1)
return chain.from_iterable(combinations(lst, r) for r in rng)
def multi_map(key, iterable, *, default_dict=False):
"""Collect data into a multi-map.
Arguments
----------
key : function
A function that accepts an element retrieved from the
iterable and returns the key to be used in the multi-map
iterable : iterable
default_dict : boolean
Indicates whether or not the returned multi-map is an
instance of defaultdict(list)
Returns
-------
A dictionary of lists where the dictionary is either an instance of dict()
or defaultdict(list) based on the *default_dict* boolean and each list
contains the elements that are associated with the key in the order in
which they occur in the iterable.
"""
result = collections.defaultdict(list)
for rec in iterable:
result[key(rec)].append(rec)
return result if default_dict else dict(result)
def split(pred, iterable, *, trailing=True):
"""Split the iterable.
Arguments
----------
pred : function
A function that accepts an element retrieved from the iterable
and returns a boolean indicating if it is the element on which
to split
iterable : iterable
trailing : boolean
Indicates whether the split should occur on the leading edge
of the match on the split or on the trailing edge of the match
on the split
Returns
-------
The generator produces a list for each split.
If *trailing* is True then the element that was identified by the predicate
will be returned at the end of each split.
If *trailing* is False then the element that was identified by the
predicate will be returned at the beginning of the following split.
No guarantee is made regarding the state of the iterable during operation.
"""
result = []
result_append = result.append
if trailing:
for elem in iterable:
result_append(elem)
if pred(elem):
yield result
result = []
result_append = result.append
else:
for elem in iterable:
if pred(elem):
if result:
yield result
result = []
result_append = result.append
result_append(elem)
if result:
yield result
def chunk(iterable, length):
"""Collect data into chunks.
Arguments
---------
iterable : iterable
length : integer
Maximum size of each chunk to return
Returns
-------
The generator produces a tuple of elements whose size is at least one but
no more than *length*.
If the number of elements in the iterable is not a multiple of *length*
then the final tuple will be less than *length*.
This function is meant to be a variant of the recipe's function grouper()
that does not pad the last tuple with a fill-value if the number elements
in the iterable is not a multiple of the specified length.
"""
if length < 0:
return ()
iterable = iter(iterable)
result = tuple(islice(iterable, length))
while result:
yield result
result = tuple(islice(iterable, length))
def divide(iterable, n): # pylint: disable=invalid-name
"""Evenly divide elements.
Arguments
---------
iterable : iterable
n : integer
The number of buckets in which to divide the elements
Returns
-------
The generator produces *n* tuples, each containing a number of elements
where the number is calculated to be evenly distributed across all of the
returned tuples.
The number of tuples returned is always *n* and, thus, may return an empty
tuple if there is not enough data to distribute.
In order to determine the number of elements to put in each tuple, the
iterable is converted into a list. Consider using divide_sizes() and
manually slicing the iterator if this is not desirable.
"""
if n <= 0:
return []
data = list(iterable)
base, rem = divmod(len(data), n)
iterable = iter(data)
for i in range(n):
yield tuple(islice(iterable, base + 1 if i < rem else base))
def divide_sizes(count, n): # pylint: disable=invalid-name
"""Evenly divide a count.
Arguments
---------
count : integer
The number to be evenly divided
n : integer
The number of buckets in which to divide the number
Returns
-------
A list of integers indicating what size each bucket should be for an even
distribution of *count*.
The number of integers returned is always *n* and, thus, may be 0.
Useful for calculating slices for generators that might be too large to
convert into a list as happens in divide().
"""
if n <= 0:
return []
if count < 0:
return [0] * n
base, rem = divmod(count, n)
return [base + 1 if i < rem else base for i in range(n)]
| {
"repo_name": "nxdevel/nx_itertools",
"path": "nx_itertools/extra.py",
"copies": "1",
"size": "7606",
"license": "mit",
"hash": -1651712514886710500,
"line_mean": 29.424,
"line_max": 79,
"alpha_frac": 0.6301603997,
"autogenerated": false,
"ratio": 4.747815230961298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5877975630661298,
"avg_score": null,
"num_lines": null
} |
"""a collection of model-related helper classes and functions"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import json
import re
from flask import escape, Markup
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.models.mixins import AuditMixin
import humanize
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from superset import sm
from superset.utils import QueryStatus
class ImportMixin(object):
def override(self, obj):
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
def copy(self):
"""Creates a copy of the dashboard without relationships."""
new_obj = self.__class__()
new_obj.override(self)
return new_obj
def alter_params(self, **kwargs):
d = self.params_dict
d.update(kwargs)
self.params = json.dumps(d)
@property
def params_dict(self):
if self.params:
params = re.sub(',[ \t\r\n]+}', '}', self.params)
params = re.sub(',[ \t\r\n]+\]', ']', params)
return json.loads(params)
else:
return {}
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = sa.Column(sa.DateTime, default=datetime.now, nullable=True)
changed_on = sa.Column(
sa.DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(self): # noqa
return sa.Column(
sa.Integer, sa.ForeignKey('ab_user.id'),
default=self.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(self): # noqa
return sa.Column(
sa.Integer, sa.ForeignKey('ab_user.id'),
default=self.get_user_id, onupdate=self.get_user_id, nullable=True)
def _user_link(self, user):
if not user:
return ''
url = '/superset/profile/{}/'.format(user.username)
return Markup('<a href="{}">{}</a>'.format(url, escape(user) or ''))
@renders('created_by')
def creator(self): # noqa
return self._user_link(self.created_by)
@property
def changed_by_(self):
return self._user_link(self.changed_by)
@renders('changed_on')
def changed_on_(self):
return Markup(
'<span class="no-wrap">{}</span>'.format(self.changed_on))
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return Markup('<span class="no-wrap">{}</span>'.format(s))
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class QueryResult(object):
"""Object returned by the query interface"""
def __init__( # noqa
self,
df,
query,
duration,
status=QueryStatus.SUCCESS,
error_message=None):
self.df = df
self.query = query
self.duration = duration
self.status = status
self.error_message = error_message
def merge_perm(sm, permission_name, view_menu_name, connection):
permission = sm.find_permission(permission_name)
view_menu = sm.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = sm.permission_model.__table__
connection.execute(
permission_table.insert()
.values(name=permission_name),
)
if not view_menu:
view_menu_table = sm.viewmenu_model.__table__
connection.execute(
view_menu_table.insert()
.values(name=view_menu_name),
)
permission = sm.find_permission(permission_name)
view_menu = sm.find_view_menu(view_menu_name)
if permission and view_menu:
pv = sm.get_session.query(sm.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission and view_menu:
permission_view_table = sm.permissionview_model.__table__
connection.execute(
permission_view_table.insert()
.values(
permission_id=permission.id,
view_menu_id=view_menu.id,
),
)
def set_perm(mapper, connection, target): # noqa
if target.perm != target.get_perm():
link_table = target.__table__
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm()),
)
# add to view menu if not already exists
merge_perm(sm, 'datasource_access', target.get_perm(), connection)
| {
"repo_name": "alanmcruickshank/superset-dev",
"path": "superset/models/helpers.py",
"copies": "1",
"size": "5135",
"license": "apache-2.0",
"hash": -8360271160628011000,
"line_mean": 28.5114942529,
"line_max": 79,
"alpha_frac": 0.600194742,
"autogenerated": false,
"ratio": 3.925840978593272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009908838684106223,
"num_lines": 174
} |
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| {
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"path": "Server/src/virtualenv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py",
"copies": "1",
"size": "3408",
"license": "mit",
"hash": 8366273855690885000,
"line_mean": 42.1392405063,
"line_max": 79,
"alpha_frac": 0.6813380282,
"autogenerated": false,
"ratio": 4.827195467422096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.simpletree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
The supplied simpletree module provides a python-only implementation
of a full treebuilder and is a useful reference for the semantics of
the various methods.
"""
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation for the sake of
backwards compatibility (as releases up until 0.10 had a
builder called "dom" that was a minidom implemenation).
"etree" - A generic builder for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"beautifulsoup" - Beautiful soup (if installed)
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or lxml.etree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
import dom
# XXX: Keep backwards compatibility by using minidom if no implementation is given
if implementation == None:
from xml.dom import minidom
implementation = minidom
# XXX: NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "simpletree":
import simpletree
treeBuilderCache[treeType] = simpletree.TreeBuilder
elif treeType == "beautifulsoup":
import soup
treeBuilderCache[treeType] = soup.TreeBuilder
elif treeType == "lxml":
import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
# Come up with a sane default
if implementation == None:
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
implementation = ET
import etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
return treeBuilderCache.get(treeType)
| {
"repo_name": "bbondy/brianbondy.gae",
"path": "libs/html5lib/treebuilders/__init__.py",
"copies": "1",
"size": "4473",
"license": "mit",
"hash": 1301685314238968000,
"line_mean": 46.6195652174,
"line_max": 94,
"alpha_frac": 0.6317907445,
"autogenerated": false,
"ratio": 5.06568516421291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0062642019444181735,
"num_lines": 92
} |
"""A collection of modules for building different kinds of trees from HTML
documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1. A set of classes for various types of elements: Document, Doctype, Comment,
Element. These must implement the interface of ``base.treebuilders.Node``
(although comment nodes have a different signature for their constructor,
see ``treebuilders.etree.Comment``) Textual content may also be implemented
as another node type, or not, as your tree implementation requires.
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
* ``documentClass`` - the class to use for the bottommost node of a document
* ``elementClass`` - the class to use for HTML Elements
* ``commentClass`` - the class to use for comments
* ``doctypeClass`` - the class to use for doctypes
It also has one required method:
* ``getDocument`` - Returns the root node of the complete document tree
3. If you wish to run the unit tests, you must also create a ``testSerializer``
method on your treebuilder which accepts a node and returns a string
containing Node and its children serialized according to the format used in
the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| {
"repo_name": "ncos/lisa",
"path": "src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/html5lib/treebuilders/__init__.py",
"copies": "6",
"size": "3680",
"license": "mit",
"hash": -1848524000907628300,
"line_mean": 39.8181818182,
"line_max": 83,
"alpha_frac": 0.6717391304,
"autogenerated": false,
"ratio": 4.699872286079183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8371611416479182,
"avg_score": null,
"num_lines": null
} |
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree"]
from .. import constants
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
Args:
treeType (str): the name of the tree type required (case-insensitive).
Supported values are:
- "dom": The xml.dom.minidom DOM implementation
- "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
- "lxml": Optimized walker for lxml.etree
- "genshi": a Genshi stream
Implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the
"etree" tree type only).
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| {
"repo_name": "mindw/html5lib-python",
"path": "html5lib/treewalkers/__init__.py",
"copies": "1",
"size": "5558",
"license": "mit",
"hash": -6386479822638438000,
"line_mean": 37.8671328671,
"line_max": 94,
"alpha_frac": 0.5269881252,
"autogenerated": false,
"ratio": 4.858391608391608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00040514727684459246,
"num_lines": 143
} |
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
:arg str treeType: the name of the tree type required (case-insensitive).
Supported values are:
* "dom": The xml.dom.minidom DOM implementation
* "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with ElementTree,
cElementTree and lxml.etree).
* "lxml": Optimized walker for lxml.etree
* "genshi": a Genshi stream
:arg implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
tree type only).
:arg kwargs: keyword arguments passed to the etree walker--for other
walkers, this has no effect
:returns: a TreeWalker class
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| {
"repo_name": "neoscoin/neos-core",
"path": "src/ledger/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py",
"copies": "5",
"size": "5868",
"license": "mit",
"hash": -8752658690270540000,
"line_mean": 36.1038961039,
"line_max": 94,
"alpha_frac": 0.5252215406,
"autogenerated": false,
"ratio": 4.906354515050167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7931576055650168,
"avg_score": null,
"num_lines": null
} |
"""A collection of native images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import io
import logging
import os
import pwd
import shutil
import stat
from treadmill import appcfg
from treadmill import cgroups
from treadmill import fs
from treadmill import runtime
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from . import fs as image_fs
from . import _image_base
from . import _repository_base
_LOGGER = logging.getLogger(__name__)
_CONTAINER_ENV_DIR = 'env'
def create_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for s6-envdir."""
env_dir = os.path.join(container_dir, _CONTAINER_ENV_DIR)
env = {
'TREADMILL_APP': app.app,
'TREADMILL_CELL': app.cell,
'TREADMILL_CPU': app.cpu,
'TREADMILL_DISK': app.disk,
'TREADMILL_HOST_IP': app.network.external_ip,
'TREADMILL_IDENTITY': app.identity,
'TREADMILL_IDENTITY_GROUP': app.identity_group,
'TREADMILL_INSTANCEID': app.task,
'TREADMILL_MEMORY': app.memory,
'TREADMILL_PROID': app.proid,
'TREADMILL_ENV': app.environment,
}
for endpoint in app.endpoints:
envname = 'TREADMILL_ENDPOINT_{0}'.format(endpoint.name.upper())
env[envname] = endpoint.real_port
env['TREADMILL_EPHEMERAL_TCP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.tcp]
)
env['TREADMILL_EPHEMERAL_UDP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.udp]
)
env['TREADMILL_CONTAINER_IP'] = app.network.vip
env['TREADMILL_GATEWAY_IP'] = app.network.gateway
if app.shared_ip:
env['TREADMILL_SERVICE_IP'] = app.network.external_ip
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_ENV_DIR))
fs.mount_bind(root_dir, os.path.join('/', _CONTAINER_ENV_DIR),
target=os.path.join(container_dir, _CONTAINER_ENV_DIR),
bind_opt='--bind')
def create_supervision_tree(container_dir, root_dir, app):
"""Creates s6 supervision tree."""
sys_dir = os.path.join(container_dir, 'sys')
sys_scandir = supervisor.create_scan_dir(
sys_dir,
finish_timeout=6000,
monitor_service='monitor'
)
for svc_def in app.system_services:
supervisor.create_service(
sys_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid='root',
environ_dir=os.path.join(container_dir, _CONTAINER_ENV_DIR),
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=svc_def.downed,
trace=None,
monitor_policy={
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
}
)
sys_scandir.write()
services_dir = os.path.join(container_dir, 'services')
services_scandir = supervisor.create_scan_dir(
services_dir,
finish_timeout=5000
)
trace = {
'instanceid': app.name,
'uniqueid': app.uniqueid
}
for svc_def in app.services:
supervisor.create_service(
services_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid=svc_def.proid,
environ_dir='/' + _CONTAINER_ENV_DIR,
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=False,
trace=trace if svc_def.trace else None,
monitor_policy={
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
}
)
services_scandir.write()
# Bind the service directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, 'services'))
fs.mount_bind(root_dir, '/services',
target=os.path.join(container_dir, 'services'),
bind_opt='--bind')
def make_fsroot(root_dir, proid):
"""Initializes directory structure for the container in a new root.
- Bind directories in parent / (with exceptions - see below.)
- Skip /tmp, create /tmp in the new root with correct permissions.
- Selectively create / bind /var.
- /var/tmp (new)
- /var/logs (new)
- /var/spool - create empty with dirs.
- Bind everything in /var, skipping /spool/tickets
"""
newroot_norm = fs.norm_safe(root_dir)
mounts = [
'/bin',
'/common',
'/dev',
'/etc',
'/home',
'/lib',
'/lib64',
'/mnt',
'/proc',
'/sbin',
'/srv',
'/sys',
'/usr',
'/var/lib/sss',
'/var/tmp/treadmill/env',
'/var/tmp/treadmill/spool',
]
# Add everything under /opt
mounts += glob.glob('/opt/*')
emptydirs = [
'/tmp',
'/opt',
'/var/empty',
'/var/run',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
'/var/tmp',
'/var/tmp/cores',
]
stickydirs = [
'/tmp',
'/opt',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
'/var/tmp',
'/var/tmp/cores/',
]
for mount in mounts:
if os.path.exists(mount):
fs.mount_bind(newroot_norm, mount)
for directory in emptydirs:
_LOGGER.debug('Creating empty dir: %s', directory)
fs.mkdir_safe(newroot_norm + directory)
for directory in stickydirs:
os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)
# Mount .../tickets .../keytabs on tempfs, so that they will be cleaned
# up when the container exits.
#
# TODO: Do we need to have a single mount for all tmpfs dirs?
for tmpfsdir in ['/var/spool/tickets', '/var/spool/keytabs',
'/var/spool/tokens']:
fs.mount_tmpfs(newroot_norm, tmpfsdir, '4M')
def create_etc_overlay(tm_env, container_dir, root_dir, app):
"""Create overlay configuration (etc) files for the container.
"""
# ldpreloads
_prepare_ldpreload(container_dir, app)
# hosts
_prepare_hosts(container_dir, app)
# resolv.conf
_prepare_resolv_conf(tm_env, container_dir)
# sshd PAM configuration
_prepare_pam_sshd(tm_env, container_dir, app)
# constructed keytab.
_prepare_krb(tm_env, container_dir)
# bind prepared inside container
_bind_etc_overlay(container_dir, root_dir)
def _prepare_krb(tm_env, container_dir):
"""Manage kerberos environment inside container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
kt_dest = os.path.join(etc_dir, 'krb5.keytab')
kt_source = os.path.join(tm_env.root, 'spool', 'krb5.keytab')
if os.path.exists(kt_source):
_LOGGER.info('Copy keytab: %s to %s', kt_source, kt_dest)
shutil.copyfile(kt_source, kt_dest)
else:
# TODO: need to abort.
_LOGGER.error('Unable to copy host keytab: %s', kt_source)
def _prepare_ldpreload(container_dir, app):
"""Add mandatory ldpreloads to the container environment.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')
try:
shutil.copyfile('/etc/ld.so.preload', new_ldpreload)
except IOError as err:
if err.errno != errno.ENOENT:
raise
_LOGGER.info('/etc/ld.so.preload not found, creating empty.')
utils.touch(new_ldpreload)
ldpreloads = []
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')
ldpreloads.append(treadmill_bind_preload)
if not ldpreloads:
return
_LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)
with io.open(new_ldpreload, 'a') as f:
f.write('\n'.join(ldpreloads) + '\n')
def _prepare_hosts(container_dir, app):
"""Create a hosts file for the container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_hosts = os.path.join(etc_dir, 'hosts')
new_hosts_orig = os.path.join(etc_dir, 'hosts.original')
new_host_aliases = os.path.join(etc_dir, 'host-aliases')
shutil.copyfile(
'/etc/hosts',
new_hosts
)
shutil.copyfile(
'/etc/hosts',
new_hosts_orig
)
fs.mkdir_safe(new_host_aliases)
pwnam = pwd.getpwnam(app.proid)
os.chown(new_host_aliases, pwnam.pw_uid, pwnam.pw_gid)
def _prepare_pam_sshd(tm_env, container_dir, app):
"""Override pam.d sshd stack with special sshd pam stack.
"""
pamd_dir = os.path.join(container_dir, 'overlay', 'etc', 'pam.d')
fs.mkdir_safe(pamd_dir)
new_pam_sshd = os.path.join(pamd_dir, 'sshd')
if app.shared_network:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd.shared_network')
else:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd')
if not os.path.exists(template_pam_sshd):
template_pam_sshd = '/etc/pam.d/sshd'
shutil.copyfile(
template_pam_sshd,
new_pam_sshd
)
def _prepare_resolv_conf(tm_env, container_dir):
"""Create an resolv.conf file for the container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_resolv_conf = os.path.join(etc_dir, 'resolv.conf')
# TODO(boysson): This should probably be based instead on /etc/resolv.conf
# for other resolver options
template_resolv_conf = os.path.join(tm_env.root, 'etc', 'resolv.conf')
if not os.path.exists(template_resolv_conf):
template_resolv_conf = '/etc/resolv.conf'
shutil.copyfile(
template_resolv_conf,
new_resolv_conf
)
def _bind_etc_overlay(container_dir, root_dir):
"""Create the overlay in the container."""
# Overlay overrides container configs
# - /etc/resolv.conf, so that container always uses dnscache.
# - pam.d sshd stack with special sshd pam that unshares network.
# - /etc/ld.so.preload to enforce necessary system hooks
#
overlay_dir = os.path.join(container_dir, 'overlay')
for overlay_file in ['etc/hosts',
'etc/host-aliases',
'etc/ld.so.preload',
'etc/pam.d/sshd',
'etc/resolv.conf',
'etc/krb5.keytab']:
fs.mount_bind(root_dir, os.path.join('/', overlay_file),
target=os.path.join(overlay_dir, overlay_file),
bind_opt='--bind')
# Also override resolv.conf in the current mount namespace so that
# system services have access to out resolver.
fs.mount_bind('/', '/etc/resolv.conf',
target=os.path.join(overlay_dir, 'etc/resolv.conf'),
bind_opt='--bind')
def share_cgroup_info(root_dir, app):
"""Shares subset of cgroup tree with the container."""
# Bind /cgroup/memory inside chrooted environment to /cgroup/.../memory
# of the container.
unique_name = appcfg.app_unique_name(app)
cgrp = os.path.join('treadmill', 'apps', unique_name)
# FIXME: This should be removed and proper cgroups should be
# exposed (readonly). This is so that tools that
# (correctly) read /proc/self/cgroups can access cgroup
# data.
shared_subsystems = ['memory']
for subsystem in shared_subsystems:
fs.mkdir_safe(os.path.join(root_dir, 'cgroup', subsystem))
fs.mount_bind(root_dir,
os.path.join('/cgroup', subsystem),
cgroups.makepath(subsystem, cgrp))
class NativeImage(_image_base.Image):
"""Represents a native image."""
__slots__ = (
'tm_env'
)
def __init__(self, tm_env):
self.tm_env = tm_env
def unpack(self, container_dir, root_dir, app):
make_fsroot(root_dir, app.proid)
image_fs.configure_plugins(self.tm_env, container_dir, app)
# FIXME: Lots of things are still reading this file.
# Copy updated state manifest as app.json in the
# container_dir so it is visible in chrooted env.
shutil.copy(os.path.join(container_dir, runtime.STATE_JSON),
os.path.join(root_dir, appcfg.APP_JSON))
create_environ_dir(container_dir, root_dir, app)
create_supervision_tree(container_dir, root_dir, app)
create_etc_overlay(self.tm_env, container_dir, root_dir, app)
share_cgroup_info(root_dir, app)
class NativeImageRepository(_repository_base.ImageRepository):
"""A collection of native images."""
def __init__(self, tm_env):
super(NativeImageRepository, self).__init__(tm_env)
def get(self, url):
return NativeImage(self.tm_env)
| {
"repo_name": "captiosus/treadmill",
"path": "treadmill/runtime/linux/image/native.py",
"copies": "1",
"size": "13416",
"license": "apache-2.0",
"hash": 818338753708417000,
"line_mean": 30.5670588235,
"line_max": 78,
"alpha_frac": 0.5977191413,
"autogenerated": false,
"ratio": 3.400760456273764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4498479597573764,
"avg_score": null,
"num_lines": null
} |
"""A collection of native images.
"""
import errno
import glob
import logging
import os
import pwd
import shutil
import stat
import treadmill
from treadmill import appcfg
from treadmill import cgroups
from treadmill import fs
from treadmill import runtime
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from . import fs as image_fs
from . import _image_base
from . import _repository_base
_LOGGER = logging.getLogger(__name__)
_CONTAINER_ENV_DIR = 'environ'
def create_environ_dir(env_dir, app):
"""Creates environ dir for s6-envdir."""
appenv = {envvar.name: envvar.value for envvar in app.environ}
supervisor.create_environ_dir(
os.path.join(env_dir, 'app'),
appenv
)
env = {
'TREADMILL_CPU': app.cpu,
'TREADMILL_DISK': app.disk,
'TREADMILL_MEMORY': app.memory,
'TREADMILL_CELL': app.cell,
'TREADMILL_APP': app.app,
'TREADMILL_INSTANCEID': app.task,
'TREADMILL_HOST_IP': app.network.external_ip,
'TREADMILL_IDENTITY': app.identity,
'TREADMILL_IDENTITY_GROUP': app.identity_group,
'TREADMILL_PROID': app.proid,
}
for endpoint in app.endpoints:
envname = 'TREADMILL_ENDPOINT_{0}'.format(endpoint.name.upper())
env[envname] = endpoint.real_port
env['TREADMILL_EPHEMERAL_TCP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.tcp]
)
env['TREADMILL_EPHEMERAL_UDP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.udp]
)
env['TREADMILL_CONTAINER_IP'] = app.network.vip
# Override appenv with mandatory treadmill environment.
supervisor.create_environ_dir(
os.path.join(env_dir, 'sys'),
env
)
def _create_logrun(directory):
"""Creates log directory with run file to start s6 logger."""
fs.mkdir_safe(os.path.join(directory, 'log'))
utils.create_script(os.path.join(directory, 'log', 'run'),
'logger.run')
def _create_sysrun(sys_dir, name, command, down=False):
"""Create system script."""
fs.mkdir_safe(os.path.join(sys_dir, name))
utils.create_script(os.path.join(sys_dir, name, 'run'),
'supervisor.run_sys',
cmd=command)
_create_logrun(os.path.join(sys_dir, name))
if down:
utils.touch(os.path.join(sys_dir, name, 'down'))
def create_supervision_tree(container_dir, app):
"""Creates s6 supervision tree."""
# Disable R0915: Too many statements
# pylint: disable=R0915
root_dir = os.path.join(container_dir, 'root')
# Services and sys directories will be restored when container restarts
# with data retention on existing volume.
#
# Sys directories will be removed. Services directory will stay, which
# present a danger of accumulating restart counters in finished files.
#
# TODO:
#
# It is rather arbitrary how restart counts should work when data is
# restored, but most likely services are "restart always" policy, so it
# will not affect them.
services_dir = os.path.join(container_dir, 'services')
sys_dir = os.path.join(container_dir, 'sys')
if os.path.exists(sys_dir):
_LOGGER.info('Deleting existing sys dir: %s', sys_dir)
shutil.rmtree(sys_dir)
app_json = os.path.join(root_dir, 'app.json')
# Create /services directory for the supervisor
svcdir = os.path.join(root_dir, 'services')
fs.mkdir_safe(svcdir)
fs.mkdir_safe(services_dir)
fs.mount_bind(root_dir, '/services', services_dir)
root_pw = pwd.getpwnam('root')
proid_pw = pwd.getpwnam(app.proid)
# Create .s6-svscan directories for svscan finish
sys_svscandir = os.path.join(sys_dir, '.s6-svscan')
fs.mkdir_safe(sys_svscandir)
svc_svscandir = os.path.join(services_dir, '.s6-svscan')
fs.mkdir_safe(svc_svscandir)
# svscan finish scripts to wait on all services
utils.create_script(
os.path.join(sys_svscandir, 'finish'),
'svscan.finish',
timeout=6000
)
utils.create_script(
os.path.join(svc_svscandir, 'finish'),
'svscan.finish',
timeout=5000
)
for svc in app.services:
if getattr(svc, 'root', False):
svc_user = 'root'
svc_home = root_pw.pw_dir
svc_shell = root_pw.pw_shell
else:
svc_user = app.proid
svc_home = proid_pw.pw_dir
svc_shell = proid_pw.pw_shell
supervisor.create_service(
services_dir, svc_user, svc_home, svc_shell,
svc.name, svc.command,
env=app.environment, down=True,
envdirs=['/environ/app', '/environ/sys'], as_root=True,
)
_create_logrun(os.path.join(services_dir, svc.name))
for svc in app.system_services:
supervisor.create_service(
services_dir, 'root', root_pw.pw_dir, root_pw.pw_shell,
svc.name, svc.command,
env=app.environment, down=False,
envdirs=['/environ/sys'], as_root=True,
)
_create_logrun(os.path.join(services_dir, svc.name))
# Vring services
for cell in app.vring.cells:
fs.mkdir_safe(os.path.join(sys_dir, 'vring.%s' % cell))
cmd = '%s sproc --zookeeper - --cell %s vring %s' % (
treadmill.TREADMILL_BIN, cell, app_json)
utils.create_script(
os.path.join(sys_dir, 'vring.%s' % cell, 'run'),
'supervisor.run_sys',
cmd=cmd
)
_create_logrun(os.path.join(sys_dir, 'vring.%s' % cell))
# Create endpoint presence service
presence_monitor_cmd = '%s sproc presence monitor %s %s' % (
treadmill.TREADMILL_BIN,
app_json,
container_dir
)
presence_register_cmd = '%s sproc presence register %s %s' % (
treadmill.TREADMILL_BIN,
app_json,
container_dir
)
shadow_etc = os.path.join(container_dir, 'overlay', 'etc')
host_aliases_cmd = '%s sproc host-aliases --aliases-dir %s %s %s' % (
treadmill.TREADMILL_BIN,
os.path.join(shadow_etc, 'host-aliases'),
os.path.join(shadow_etc, 'hosts.original'),
os.path.join(shadow_etc, 'hosts'),
)
_create_sysrun(sys_dir, 'monitor', presence_monitor_cmd)
_create_sysrun(sys_dir, 'register', presence_register_cmd)
_create_sysrun(sys_dir, 'hostaliases', host_aliases_cmd)
cmd = None
args = None
if hasattr(app, 'command'):
cmd = app.command
if hasattr(app, 'args'):
args = app.args
if not cmd:
cmd = subproc.resolve('s6_svscan')
if not args:
args = ['/services']
_create_sysrun(
sys_dir,
'start_container',
'%s %s %s -m -p -i %s %s' % (
subproc.resolve('chroot'),
root_dir,
subproc.resolve('pid1'),
cmd,
' '.join(args)
),
down=True
)
def make_fsroot(root, proid):
"""Initializes directory structure for the container in a new root.
- Bind directories in parent / (with exceptions - see below.)
- Skip /tmp, create /tmp in the new root with correct permissions.
- Selectively create / bind /var.
- /var/tmp (new)
- /var/logs (new)
- /var/spool - create empty with dirs.
- Bind everything in /var, skipping /spool/tickets
"""
newroot_norm = fs.norm_safe(root)
mounts = [
'/bin',
'/common',
'/dev',
'/etc',
'/home',
'/lib',
'/lib64',
'/mnt',
'/proc',
'/sbin',
'/srv',
'/sys',
'/usr',
'/var/tmp/treadmill/env',
'/var/tmp/treadmill/spool',
] + glob.glob('/opt/*')
emptydirs = [
'/tmp',
'/opt',
'/var/empty',
'/var/run',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
'/var/tmp',
'/var/tmp/cores',
]
stickydirs = [
'/tmp',
'/opt',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
'/var/tmp',
'/var/tmp/cores/',
]
for directory in emptydirs:
_LOGGER.debug('Creating empty dir: %s', directory)
fs.mkdir_safe(newroot_norm + directory)
for directory in stickydirs:
os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)
for mount in mounts:
if os.path.exists(mount):
fs.mount_bind(newroot_norm, mount)
# Mount .../tickets .../keytabs on tempfs, so that they will be cleaned
# up when the container exits.
#
# TODO: Do we need to have a single mount for all tmpfs dirs?
for tmpfsdir in ['/var/spool/tickets', '/var/spool/keytabs',
'/var/spool/tokens']:
fs.mount_tmpfs(newroot_norm, tmpfsdir, '4M')
def etc_overlay(tm_env, container_dir, root_dir, app):
"""Create overlay configuration (etc) files for the container.
"""
# ldpreloads
_prepare_ldpreload(container_dir, app)
# hosts
_prepare_hosts(container_dir, app)
# resolv.conf
_prepare_resolv_conf(tm_env, container_dir)
# sshd PAM configuration
_prepare_pam_sshd(tm_env, container_dir, app)
# bind prepared inside container
_bind_etc_overlay(container_dir, root_dir)
def _prepare_ldpreload(container_dir, app):
"""Add mandatory ldpreloads to the container environment.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')
try:
shutil.copyfile('/etc/ld.so.preload', new_ldpreload)
except IOError as err:
if err.errno != errno.ENOENT:
raise
_LOGGER.info('/etc/ld.so.preload not found, creating empty.')
utils.touch(new_ldpreload)
ldpreloads = []
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')
ldpreloads.append(treadmill_bind_preload)
if not ldpreloads:
return
_LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)
with open(new_ldpreload, 'a') as f:
f.write('\n'.join(ldpreloads) + '\n')
def _prepare_hosts(container_dir, app):
"""Create a hosts file for the container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_hosts = os.path.join(etc_dir, 'hosts')
new_hosts_orig = os.path.join(etc_dir, 'hosts.original')
new_host_aliases = os.path.join(etc_dir, 'host-aliases')
shutil.copyfile(
'/etc/hosts',
new_hosts
)
shutil.copyfile(
'/etc/hosts',
new_hosts_orig
)
fs.mkdir_safe(new_host_aliases)
pwnam = pwd.getpwnam(app.proid)
os.chown(new_host_aliases, pwnam.pw_uid, pwnam.pw_gid)
def _prepare_pam_sshd(tm_env, container_dir, app):
"""Override pam.d sshd stack with special sshd pam stack.
"""
pamd_dir = os.path.join(container_dir, 'overlay', 'etc', 'pam.d')
fs.mkdir_safe(pamd_dir)
new_pam_sshd = os.path.join(pamd_dir, 'sshd')
if app.shared_network:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd.shared_network')
else:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd')
if not os.path.exists(template_pam_sshd):
template_pam_sshd = '/etc/pam.d/sshd'
shutil.copyfile(
template_pam_sshd,
new_pam_sshd
)
def _prepare_resolv_conf(tm_env, container_dir):
"""Create an resolv.conf file for the container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_resolv_conf = os.path.join(etc_dir, 'resolv.conf')
# TODO(boysson): This should probably be based instead on /etc/resolv.conf
# for other resolver options
template_resolv_conf = os.path.join(tm_env.root, 'etc', 'resolv.conf')
if not os.path.exists(template_resolv_conf):
template_resolv_conf = '/etc/resolv.conf'
shutil.copyfile(
template_resolv_conf,
new_resolv_conf
)
def _bind_etc_overlay(container_dir, root_dir):
"""Create the overlay in the container."""
# Overlay overrides container configs
# - /etc/resolv.conf, so that container always uses dnscache.
# - pam.d sshd stack with special sshd pam that unshares network.
# - /etc/ld.so.preload to enforce necessary system hooks
#
overlay_dir = os.path.join(container_dir, 'overlay')
for overlay_file in ['etc/hosts',
'etc/host-aliases',
'etc/ld.so.preload',
'etc/pam.d/sshd',
'etc/resolv.conf']:
fs.mount_bind(root_dir, os.path.join('/', overlay_file),
target=os.path.join(overlay_dir, overlay_file),
bind_opt='--bind')
# Also override resolv.conf in the current mount namespace so that
# system services have access to out resolver.
fs.mount_bind('/', '/etc/resolv.conf',
target=os.path.join(overlay_dir, 'etc/resolv.conf'),
bind_opt='--bind')
def share_cgroup_info(app, root_dir):
"""Shares subset of cgroup tree with the container."""
# Bind /cgroup/memory inside chrooted environment to /cgroup/.../memory
# of the container.
unique_name = appcfg.app_unique_name(app)
cgrp = os.path.join('treadmill', 'apps', unique_name)
# FIXME: This should be removed and proper cgroups should be
# exposed (readonly). This is so that tools that
# (correctly) read /proc/self/cgroups can access cgroup
# data.
shared_subsystems = ['memory']
for subsystem in shared_subsystems:
fs.mkdir_safe(os.path.join(root_dir, 'cgroup', subsystem))
fs.mount_bind(root_dir,
os.path.join('/cgroup', subsystem),
cgroups.makepath(subsystem, cgrp))
class NativeImage(_image_base.Image):
"""Represents a native image."""
__slots__ = (
'tm_env'
)
def __init__(self, tm_env):
self.tm_env = tm_env
def unpack(self, container_dir, root_dir, app):
make_fsroot(root_dir, app.proid)
image_fs.configure_plugins(self.tm_env, root_dir, app)
# FIXME: Lots of things are still reading this file.
# Copy updated state manifest as app.json in the
# container_dir so it is visible in chrooted env.
shutil.copy(os.path.join(container_dir, runtime.STATE_JSON),
os.path.join(root_dir, appcfg.APP_JSON))
# FIXME: env_dir should be in a well defined location (part of the
# container "API").
env_dir = os.path.join(root_dir, 'environ')
create_environ_dir(env_dir, app)
create_supervision_tree(container_dir, app)
share_cgroup_info(app, root_dir)
etc_overlay(self.tm_env, container_dir, root_dir, app)
class NativeImageRepository(_repository_base.ImageRepository):
"""A collection of native images."""
def __init__(self, tm_env):
super(NativeImageRepository, self).__init__(tm_env)
def get(self, url):
return NativeImage(self.tm_env)
| {
"repo_name": "keithhendry/treadmill",
"path": "treadmill/runtime/linux/image/native.py",
"copies": "2",
"size": "15443",
"license": "apache-2.0",
"hash": -8597321526959865000,
"line_mean": 30.1350806452,
"line_max": 78,
"alpha_frac": 0.5984588487,
"autogenerated": false,
"ratio": 3.3491650401214486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49476238888214485,
"avg_score": null,
"num_lines": null
} |
# a collection of non-class routines that perform operations on a list
# of images
import math
# return the bounds of the rectangle spanned by the provided list of
# images
def coverage(image_list):
xmin = None; xmax = None; ymin = None; ymax = None
for image in image_list:
(x0, y0, x1, y1) = image.coverage()
if xmin == None or x0 < xmin:
xmin = x0
if ymin == None or y0 < ymin:
ymin = y0
if xmax == None or x1 > xmax:
xmax = x1
if ymax == None or y1 > ymax:
ymax = y1
print("List area coverage: (%.2f %.2f) (%.2f %.2f)" \
% (xmin, ymin, xmax, ymax))
return (xmin, ymin, xmax, ymax)
# return True/False if the given rectangles overlap
def rectanglesOverlap(r1, r2):
(ax0, ay0, ax1, ay1) = r1
(bx0, by0, bx1, by1) = r2
if ax0 <= bx1 and ax1 >= bx0 and ay0 <= by1 and ay1 >= by0:
return True
else:
return False
# return a list of images that intersect the given rectangle
def getImagesCoveringRectangle(image_list, r2, only_placed=False):
# build list of images covering target point
coverage_list = []
for image in image_list:
r1 = image.coverage()
if only_placed and not image.placed:
continue
if rectanglesOverlap(r1, r2):
coverage_list.append(image)
return coverage_list
# return a list of images that cover the given point within 'pad'
# or are within 'pad' distance of touching the point.
def getImagesCoveringPoint(image_list, x=0.0, y=0.0, pad=20.0, only_placed=False):
# build list of images covering target point
coverage_list = []
bx0 = x-pad
by0 = y-pad
bx1 = x+pad
by1 = y+pad
r2 = (bx0, by0, bx1, by1)
coverage_list = getImagesCoveringRectangle(image_list, r2, only_placed)
name_list = []
for image in coverage_list:
name_list.append(image.name)
print("Images covering point (%.2f %.2f): %s" % (x, y, str(name_list)))
return coverage_list
def x2lon(self, x):
nm2m = 1852.0
x_nm = x / nm2m
factor = math.cos(self.ref_lat*math.pi/180.0)
x_deg = (x_nm / 60.0) / factor
return x_deg + self.ref_lon
def y2lat(self, y):
nm2m = 1852.0
y_nm = y / nm2m
y_deg = y_nm / 60.0
return y_deg + self.ref_lat
# x, y are in meters ref_lon/lat in degrees
def cart2wgs84( x, y, ref_lon, ref_lat ):
nm2m = 1852.0
x_nm = x / nm2m
y_nm = y / nm2m
factor = math.cos(ref_lat*math.pi/180.0)
x_deg = (x_nm / 60.0) / factor + ref_lon
y_deg = y_nm / 60.0 + ref_lat
return (x_deg, y_deg)
# x, y are in meters ref_lon/lat in degrees
def wgs842cart( lon_deg, lat_deg, ref_lon, ref_lat ):
nm2m = 1852.0
x_deg = lon_deg - ref_lon
y_deg = lat_deg - ref_lat
factor = math.cos(ref_lat*math.pi/180.0)
x_nm = x_deg * 60.0 * factor
y_nm = y_deg * 60.0
x_m = x_nm * nm2m
y_m = y_nm * nm2m
return (x_m, y_m)
| {
"repo_name": "UASLab/ImageAnalysis",
"path": "scripts/lib/image_list.py",
"copies": "1",
"size": "2956",
"license": "mit",
"hash": 1103809903412203100,
"line_mean": 29.1632653061,
"line_max": 82,
"alpha_frac": 0.5953991881,
"autogenerated": false,
"ratio": 2.845043310875842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39404424989758424,
"avg_score": null,
"num_lines": null
} |
"""A collection of non-environment specific tools"""
import sys
import os
from robofab.objects.objectsRF import RInfo
if sys.platform == "darwin" and sys.version_info[:3] == (2, 2, 0):
# the Mac support of Jaguar's Python 2.2 is broken
have_broken_macsupport = 1
else:
have_broken_macsupport = 0
def readGlyphConstructions():
"""read GlyphConstruction and turn it into a dict"""
from robofab.tools.glyphConstruction import _glyphConstruction
data = _glyphConstruction.split("\n")
glyphConstructions = {}
for i in data:
if len(i) == 0: continue
if i[0] != '#':
name = i.split(': ')[0]
construction = i.split(': ')[1].split(' ')
build = [construction[0]]
for c in construction[1:]:
accent = c.split('.')[0]
position = c.split('.')[1]
build.append((accent, position))
glyphConstructions[name] = build
return glyphConstructions
#
#
# glyph.unicode: ttFont["cmap"].getcmap(3, 1)
#
#
def guessFileType(fileName):
if not os.path.exists(fileName):
return None
base, ext = os.path.splitext(fileName)
ext = ext.lower()
if not have_broken_macsupport:
try:
import MacOS
except ImportError:
pass
else:
cr, tp = MacOS.GetCreatorAndType(fileName)
if tp in ("sfnt", "FFIL"):
return "TTF"
if tp == "LWFN":
return "Type 1"
if ext == ".dfont":
return "TTF"
if ext in (".otf", ".ttf"):
return "TTF"
if ext in (".pfb", ".pfa"):
return "Type 1"
return None
def extractTTFFontInfo(font):
# UFO.info attribute name / index.
# name table entries index according to http://www.microsoft.com/typography/otspec/name.htm
attrs = [
('copyright', 0),
('familyName', 1),
('fontStyle', 1),
('postscriptFullName', 4),
('trademark', 7),
('openTypeNameDesigner', 9),
('openTypeNameLicenseURL', 14),
('openTypeNameDesignerURL', 12),
]
info = RInfo()
names = font['name']
info.ascender = font['hhea'].ascent
info.descender = font['hhea'].descent
info.unitsPerEm = font['head'].unitsPerEm
for name, index in attrs:
entry = font["name"].getName(index, 3, 1)
if entry is not None:
try:
setattr(info, name, str(entry.string, "utf16"))
except:
print("Error importing value %s: %s"%(str(name), str(info)))
return info
def extractT1FontInfo(font):
info = RInfo()
src = font.font['FontInfo']
factor = font.font['FontMatrix'][0]
assert factor > 0
info.unitsPerEm = int(round(1/factor, 0))
# assume something for ascender descender
info.ascender = (info.unitsPerEm / 5) * 4
info.descender = info.ascender - info.unitsPerEm
info.versionMajor = font.font['FontInfo']['version']
info.fullName = font.font['FontInfo']['FullName']
info.familyName = font.font['FontInfo']['FullName'].split("-")[0]
info.notice = str(font.font['FontInfo']['Notice'], "macroman")
info.italicAngle = font.font['FontInfo']['ItalicAngle']
info.uniqueID = font['UniqueID']
return info
def fontToUFO(src, dst, fileType=None):
from robofab.ufoLib import UFOWriter
from robofab.pens.adapterPens import SegmentToPointPen
if fileType is None:
fileType = guessFileType(src)
if fileType is None:
raise ValueError("Can't determine input file type")
ufoWriter = UFOWriter(dst)
if fileType == "TTF":
from fontTools.ttLib import TTFont
font = TTFont(src, 0)
elif fileType == "Type 1":
from fontTools.t1Lib import T1Font
font = T1Font(src)
else:
assert 0, "unknown file type: %r" % fileType
inGlyphSet = font.getGlyphSet()
outGlyphSet = ufoWriter.getGlyphSet()
for glyphName in list(inGlyphSet.keys()):
print("-", glyphName)
glyph = inGlyphSet[glyphName]
def drawPoints(pen):
pen = SegmentToPointPen(pen)
glyph.draw(pen)
outGlyphSet.writeGlyph(glyphName, glyph, drawPoints)
outGlyphSet.writeContents()
if fileType == "TTF":
info = extractTTFFontInfo(font)
elif fileType == "Type 1":
info = extractT1FontInfo(font)
ufoWriter.writeInfo(info)
if __name__ == "__main__":
print(readGlyphConstructions())
| {
"repo_name": "adrientetar/robofab",
"path": "Lib/robofab/tools/toolsAll.py",
"copies": "1",
"size": "3934",
"license": "bsd-3-clause",
"hash": -5750262888277025000,
"line_mean": 26.7112676056,
"line_max": 92,
"alpha_frac": 0.6820030503,
"autogenerated": false,
"ratio": 2.8883994126284875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8746839863693382,
"avg_score": 0.0647125198470211,
"num_lines": 142
} |
## A Collection of NURBS curve utility functions
## J Eisenmann
## ACCAD, The Ohio State University
## 2012-13
from math import *
from Vector import *
import maya.cmds as mc
import maya.mel as mm
mc.loadPlugin("closestPointOnCurve", quiet=True)
def drawLine(pt1, pt2):
try: # if pt1 and pt2 are Vectors
mc.curve( p=[pt1.asTuple(), pt1.asTuple(), pt2.asTuple(), pt2.asTuple()] )
except: # if pt1 and pt2 are tuples or lists
mc.curve( p=[pt1, pt1, pt2, pt2] )
def connectedNodeOfType( curve, type ):
for node in mc.connectionInfo( curve+".worldSpace", destinationFromSource=True):
if( type == mc.nodeType(node) ):
return node.split('.')[0].split('>')[-1]
return None
def getCurveInfoNode( curve ):
infoNode = connectedNodeOfType( curve, "curveInfo" )
if not infoNode:
print "adding an info node to curve: "+curve
infoNode = mc.createNode("curveInfo")
mc.connectAttr( curve+".worldSpace", infoNode+".inputCurve")
return infoNode
def getCurveArcLenDimNode( curve ):
arcLenDimNode = connectedNodeOfType( curve, "arcLengthDimension" )
if not arcLenDimNode:
max = mc.getAttr( curve+".maxValue" )
print "adding an arcLengthDimension node to curve: "+curve
arcLenDimNode = mc.arcLengthDimension( curve+".u[%f]"%max )
return arcLenDimNode
def getClosestPointNode( curve ):
cpNode = connectedNodeOfType( curve, "closestPointOnCurve" )
if not cpNode:
print "adding a closestPointOnCurve node to curve: "+curve
cpNode = mc.closestPointOnCurve(curve);
return cpNode
def findParamAtPoint( curve, point ):
cpNode = getClosestPointNode( curve )
mc.setAttr(cpNode+".inPosition", point[0], point[1], point[2] )
return mc.getAttr(cpNode+".paramU")
def findArcLenAtParam( curve, param ):
arcLenDimNode = getCurveArcLenDimNode( curve )
mc.setAttr( arcLenDimNode+".uParamValue", param )
return mc.getAttr( arcLenDimNode+".arcLength" )
def curveArcLen( curve ):
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
return arcLength
def findParamAtArcLen( curve, distance, epsilon=0.0001 ):
""" Returns the U parameter value at a specified length along a curve
(Adapted from: http://ewertb.soundlinker.com/mel/mel.108.php) """
u = 0.0
min = mc.getAttr( curve+".minValue" )
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
# Don't bother doing any work for the start or end of the curve.
if ( distance <= 0.0 ):
return 0.0
if ( distance >= arcLength ):
return max
# This is merely a diagnostic to measure the number of passes required to
# find any particular point. You may be surprised that the number of
# passes is typically quite low.
passes = 1
while ( True ):
u = ( min + max ) / 2.0
#mc.setAttr( arcLenDimNode+".uParamValue", u)
arcLength = findArcLenAtParam( curve, u ) #mc.getAttr( arcLenDimNode+".arcLength" )
if ( abs(arcLength-distance) < tol ):
break
if ( arcLength > distance ):
max = u
else:
min = u
passes+=1
return u
def findParamAtArcPercent( curve, percent, epsilon=0.0001 ):
""" Returns the U parameter value at a specified % of the length along a curve """
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
return findParamAtArcLen( curve, percent*arcLength, epsilon )
def findCVsInRange( curve, start, end ):
""" Returns a list of the (index, u)'s of the CVs of "curve" that have u parameter
values between "start" and "end" (percentages of arc length) """
indices = []
if( end >= start and start >= 0.0 and end <= 1.0):
a = findParamAtArcPercent( curve, start )
b = findParamAtArcPercent( curve, end )
# get CV positions in local (object) space
CVs = mc.getAttr( curve+".cv[*]" )
# translate them into global (world) space
CVs = [(Vector(cv)+Vector(mc.xform(curve, q=True, ws=True, translation=True))).asTuple() for cv in CVs]
for I,cv in enumerate(CVs):
U = findParamAtPoint(curve, cv)
L = findArcLenAtParam(curve, U)/curveArcLen(curve) # arc length as a percentage
if( a <= U and U <= b ):
indices.append((I,U,L))
return indices
def arcCurve( curve, t1, t2 ):
""" Perturb the tangents on the initial curve """
cv1 = list(mc.getAttr( curve+".cv[1]" )[0])
cv2 = list(mc.getAttr( curve+".cv[2]" )[0])
print cv1, cv2
for i in range(3):
cv1[i] += t1[i]
cv2[i] += t2[i]
mc.setAttr( curve+".cv[1]", cv1[0], cv1[1], cv1[2] )
mc.setAttr( curve+".cv[2]", cv2[0], cv2[1], cv2[2] )
return curve
def evenlyDivideCurve( curve, numDiv ):
""" Divides a curve into numDiv.
Assumes there are two CVs at the start and end of the curve """
# first, move the curve to the origin
translation = mc.xform(curve, q=True, ws=True, translation=True)
rotation = mc.xform(curve, q=True, ws=True, rotation=True)
mc.move(0, 0, 0, curve)
mc.rotate(0, 0, 0, curve)
# get the curve info node
infoNode = getCurveInfoNode(curve)
Knots = list( mc.getAttr( infoNode+".knots" )[0] )
CVs = mc.getAttr( curve+".cv[*]" )
numOrigCVs = len(CVs)
numOrigKnots = len(Knots)
if( not numOrigCVs == 4 ):
print("ERROR: original curve must have exactly 4 CVs")
return
else:
for p in range(0,(numDiv-numOrigCVs+4+1)):
percent = (p-1)/float(numDiv-2)
u = findParamAtArcPercent( curve, percent )
if p < 2 or p >= (numDiv-numOrigCVs+3):
CVs[p] = tuple(mc.pointOnCurve(curve, parameter=u))
else:
CVs.insert(p, tuple(mc.pointOnCurve(curve, parameter=u)) )
Knots.insert(p+1, u)
curve = mc.curve( curve,r=True, p=CVs, k=Knots)
mc.move(translation[0], translation[1], translation[2], curve)
mc.rotate(rotation[0], rotation[1], rotation[2], curve)
return curve
def bias(b, t):
return t**(log(b)/log(0.5))
def gain(g, t):
if(t<0.5):
return 0.5*bias(1-g,2*t)
else:
return 1-bias(1-g,2-2*t)/2.0
def smoothstep(a, fuzz, t):
if(t < a-fuzz):
return 0.0
elif(t > a):
return 1.0
else:
return gain(0.9, (t-(a-fuzz))/fuzz)
def pulse(a, b, fuzz, t):
return smoothstep(a, fuzz, t) - smoothstep(b, fuzz, t)
def oscillateCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Oscillates a given curve by moving each vertex in an alternating
direction based on the normal. This process takes place over the
range defined by "start" and "end" as percentages of arc length.
Oscillation eases to full strength as determined by the "ease" and
"strength" arguments. """
if(ease > (end-start)*0.5):
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
osc = sin(freq*interp)
scale = pulse(start+ease, end, ease, L) # ease must be between 0 and 0.5
## Don't use Maya's normalized normal -- it flip flops with curvature so it's not good for oscillating offset
# normal = Vector(mc.pointOnCurve(curve, parameter=cv[1], normalizedNormal=True))
# if(normal.mag() == 0.0):
# print "Getting normal from up x tangent"
normal = Vector(0,1,0)**Vector(mc.pointOnCurve(curve, parameter=U, tangent=True))
normal = normal.norm()
pos = Vector(CVs[I])
pos = pos+normal*scale*strength*osc
CVs[I] = pos.asTuple()
for i,cv in enumerate(CVs):
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
return curve
def noise(x=0, y=None, z=None):
""" Returns a Perlin noise value based on 1D or 3D input """
try:
if( isinstance(x, Vector) ): # if x is a Vector
return mm.eval("noise <<%f, %f, %f>>"%x.asTuple())
elif( len(x) == 3 ): # if x is a sequence
return mm.eval("noise <<%f, %f, %f>>"%x)
except:
if(not y == None and not z == None): # if y and z have values
return mm.eval("noise <<%f, %f, %f>>"%(x,y,z))
else: # otherwise just use 1D data
return mm.eval("noise %f"%x)
def noiseCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Adds noise to a given curve by moving each vertex with Perlin
noise based on the normal. This process takes place over the
range defined by "start" and "end" as percentages of arc length.
Noise eases to full strength as determined by the "ease" and
"strength" arguments. """
if(ease > (end-start)*0.5): # ease must be between 0 and 0.5
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
noiz = noise(freq*interp)
scale = pulse(start+ease, end, ease, L)
normal = Vector(0,1,0)**Vector(mc.pointOnCurve(curve, parameter=U, tangent=True))
normal = normal.norm()
pos = Vector(CVs[I])
pos = pos+normal*scale*strength*noiz
CVs[I] = pos.asTuple()
for i,cv in enumerate(CVs):
print(curve+".cv[%d]"%cv[0], cv[0], cv[1], cv[2])
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
def twistCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Twist the curve over the range defined by "start" and "end" as percentages of arc length.
The twist operation happens in world space. Twist eases to full strength as determined by
the "ease" and "strength" arguments. """
if(ease > (end-start)*0.5): # ease must be between 0 and 0.5
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
bounds = mc.exactWorldBoundingBox(curve)
boundsXmin = bounds[0]
boundsWidth = bounds[3] - bounds[0]
boundsZcenter = (bounds[2]+bounds[5])*0.5
scale = pulse(start+ease, end, ease, L)
twistT = (((CVs[I][0] - boundsXmin)/boundsWidth))*2*pi*freq
print "(((%f - %f)/%f)) = %f --> %f"%(CVs[I][0],boundsXmin,boundsWidth,(((CVs[I][0] - boundsXmin)/boundsWidth)), twistT)
CVs[I] = (CVs[I][0],
0,
scale*strength*((CVs[I][2]-boundsZcenter)*sin(twistT) + CVs[I][1]*cos(twistT)) + boundsZcenter)
for i,cv in enumerate(CVs):
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
def printCurveDetails( curve ):
infoNode = getCurveInfoNode(curve)
Knots = list( mc.getAttr( infoNode+".knots" )[0] )
CVs = mc.getAttr( curve+".cv[*]" )
print "Curve Details for: "+curve
for k in Knots:
print k
for cv in CVs:
print cv
| {
"repo_name": "jeisenma/traceSelectionInMaya",
"path": "scripts/curveUtil.py",
"copies": "1",
"size": "11555",
"license": "mit",
"hash": -4355680436437923300,
"line_mean": 38.5719178082,
"line_max": 134,
"alpha_frac": 0.5864993509,
"autogenerated": false,
"ratio": 3.2330721880246225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43195715389246225,
"avg_score": null,
"num_lines": null
} |
"""A collection of ORM sqlalchemy models for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import functools
import json
import logging
import re
import textwrap
from collections import namedtuple
from copy import deepcopy, copy
from datetime import timedelta, datetime, date
import humanize
import pandas as pd
import requests
import sqlalchemy as sqla
from sqlalchemy.engine.url import make_url
import sqlparse
from dateutil.parser import parse
from flask import Markup, url_for
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import get_file_original_name
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.filters import Dimension, Filter
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.having import Aggregation
from six import string_types
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime, Date, Table, Numeric,
create_engine, MetaData, desc, asc, select, and_, func
)
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy.sql.expression import ColumnClause, TextAsFrom
from sqlalchemy_utils import EncryptedType
from werkzeug.datastructures import ImmutableMultiDict
import caravel
from caravel import app, db, get_session, utils, sm
from caravel.source_registry import SourceRegistry
from caravel.viz import viz_types
from caravel.utils import flasher, MetricPermException, DimSelector
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
FillterPattern = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls): # noqa
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls): # noqa
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@renders('created_on')
def creator(self): # noqa
return '{}'.format(self.created_by or '')
@property
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@renders('changed_on')
def changed_on_(self):
return Markup(
'<span class="no-wrap">{}</span>'.format(self.changed_on))
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return Markup('<span class="no-wrap">{}</span>'.format(s))
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(2000))
owners = relationship("User", secondary=slice_user)
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(
self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@renders('datasource_name')
def datasource_link(self):
return self.datasource.link
@property
def datasource_edit_url(self):
self.datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
d['error'] = str(e)
d['slice_id'] = self.id
d['slice_name'] = self.slice_name
d['description'] = self.description
d['slice_url'] = self.slice_url
d['edit_url'] = self.edit_url
d['description_markeddown'] = self.description_markeddown
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{obj.datasource_type}/"
"{obj.datasource_id}/".format(obj=self))
return href(slice_params)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self, url_params_multidict=None):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:param werkzeug.datastructures.MultiDict url_params_multidict:
Contains the visualization params, they override the self.params
stored in the database
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params) # {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else "table"
if url_params_multidict:
slice_params.update(url_params_multidict)
to_del = [k for k in slice_params if k not in url_params_multidict]
for k in to_del:
del slice_params[k]
immutable_slice_params = ImmutableMultiDict(slice_params)
return viz_types[immutable_slice_params.get('viz_type')](
self.datasource,
form_data=immutable_slice_params,
slice_=self
)
def set_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
target.perm = ds.perm
sqla.event.listen(Slice, 'before_insert', set_perm)
sqla.event.listen(Slice, 'before_update', set_perm)
dashboard_slices = Table(
'dashboard_slices', Model.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id'))
)
class Dashboard(Model, AuditMixinNullable):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship("User", secondary=dashboard_user)
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
return ", ".join({"{}".format(s.datasource) for s in self.slices})
@property
def url(self):
return "/caravel/dashboard/{}/".format(self.slug or self.id)
@property
def metadata_dejson(self):
if self.json_metadata:
return json.loads(self.json_metadata)
else:
return {}
@property
def sqla_metadata(self):
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title)
return Markup(
'<a href="{self.url}">{title}</a>'.format(**locals()))
@property
def json_data(self):
d = {
'id': self.id,
'metadata': self.metadata_dejson,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': json.loads(self.position_json) if self.position_json else [],
}
return json.dumps(d)
class Queryable(object):
"""A common interface to objects that are queryable (tables and datasources)"""
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
id = Column(Integer, primary_key=True)
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=False)
allow_run_sync = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
def __repr__(self):
return self.database_name
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
def set_sqlalchemy_uri(self, uri):
conn = sqla.engine.url.make_url(uri)
self.password = conn.password
conn.password = "X" * 10 if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_sqla_engine(self, schema=None):
extra = self.get_extra()
url = make_url(self.sqlalchemy_uri_decrypted)
params = extra.get('engine_params', {})
if self.backend == 'presto' and schema:
if '/' in url.database:
url.database = url.database.split('/')[0] + '/' + schema
else:
url.database += '/' + schema
elif schema:
url.database = schema
return create_engine(url, **params)
def get_df(self, sql, schema):
eng = self.get_sqla_engine(schema=schema)
cur = eng.execute(sql, schema=schema)
cols = [col[0] for col in cur.cursor.description]
df = pd.DataFrame(cur.fetchall(), columns=cols)
return df
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
compiled = qry.compile(eng, compile_kwargs={"literal_binds": True})
return '{}'.format(compiled)
def select_star(self, table_name, schema=None, limit=1000):
"""Generates a ``select *`` statement in the proper dialect"""
qry = select('*').select_from(text(table_name))
if limit:
qry = qry.limit(limit)
return self.compile_sqla_query(qry)
def wrap_sql_limit(self, sql, limit=1000):
qry = (
select('*')
.select_from(TextAsFrom(text(sql), ['*'])
.alias('inner_qry')).limit(limit)
)
return self.compile_sqla_query(qry)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
def all_table_names(self, schema=None):
return sorted(self.inspector.get_table_names(schema))
def all_view_names(self, schema=None):
views = []
try:
views = self.inspector.get_view_names(schema)
except Exception as e:
pass
return views
def all_schema_names(self):
return sorted(self.inspector.get_schema_names())
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
Grain = namedtuple('Grain', 'name label function')
db_time_grains = {
'presto': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
),
'mysql': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain("second", _('second'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
" + SECOND({col})) SECOND)"),
Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
"INTERVAL HOUR({col}) HOUR)"),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
Grain("month", _('month'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
),
'sqlite': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
Grain("month", _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days')"),
),
'postgresql': (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATE_TRUNC('second', {col})"),
Grain("minute", _('minute'), "DATE_TRUNC('minute', {col})"),
Grain("hour", _('hour'), "DATE_TRUNC('hour', {col})"),
Grain("day", _('day'), "DATE_TRUNC('day', {col})"),
Grain("week", _('week'), "DATE_TRUNC('week', {col})"),
Grain("month", _('month'), "DATE_TRUNC('month', {col})"),
Grain("year", _('year'), "DATE_TRUNC('year', {col})"),
),
'mssql': (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATEADD(second, "
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')"),
Grain("minute", _('minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}), 0)"),
Grain("5 minute", _('5 minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 5 * 5, 0)"),
Grain("half hour", _('half hour'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 30 * 30, 0)"),
Grain("hour", _('hour'), "DATEADD(hour, "
"DATEDIFF(hour, 0, {col}), 0)"),
Grain("day", _('day'), "DATEADD(day, "
"DATEDIFF(day, 0, {col}), 0)"),
Grain("week", _('week'), "DATEADD(week, "
"DATEDIFF(week, 0, {col}), 0)"),
Grain("month", _('month'), "DATEADD(month, "
"DATEDIFF(month, 0, {col}), 0)"),
Grain("quarter", _('quarter'), "DATEADD(quarter, "
"DATEDIFF(quarter, 0, {col}), 0)"),
Grain("year", _('year'), "DATEADD(year, "
"DATEDIFF(year, 0, {col}), 0)"),
),
}
db_time_grains['redshift'] = db_time_grains['postgresql']
db_time_grains['vertica'] = db_time_grains['postgresql']
for db_type, grains in db_time_grains.items():
if self.sqlalchemy_uri.startswith(db_type):
return grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def epoch_to_dttm(self, ms=False):
"""Database-specific SQL to convert unix timestamp to datetime
"""
ts2date_exprs = {
'sqlite': "datetime({col}, 'unixepoch')",
'postgresql': "(timestamp 'epoch' + {col} * interval '1 second')",
'mysql': "from_unixtime({col})",
'mssql': "dateadd(S, {col}, '1970-01-01')"
}
ts2date_exprs['redshift'] = ts2date_exprs['postgresql']
ts2date_exprs['vertica'] = ts2date_exprs['postgresql']
for db_type, expr in ts2date_exprs.items():
if self.sqlalchemy_uri.startswith(db_type):
return expr.replace('{col}', '({col}/1000.0)') if ms else expr
raise Exception(_("Unable to convert unix epoch to datetime"))
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name, schema=None):
return self.inspector.get_columns(table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/caravel/sql/{}/'.format(self.id)
@property
def perm(self):
return (
"[{obj.database_name}].(id:{obj.id})").format(obj=self)
class SqlaTable(Model, Queryable, AuditMixinNullable):
"""An ORM object for SqlAlchemy table references"""
type = "table"
__tablename__ = 'tables'
id = Column(Integer, primary_key=True)
table_name = Column(String(250))
main_dttm_col = Column(String(250))
description = Column(Text)
default_endpoint = Column(Text)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
is_featured = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='tables', foreign_keys=[user_id])
database = relationship(
'Database', backref='tables', foreign_keys=[database_id])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
schema = Column(String(255))
sql = Column(Text)
table_columns = relationship("TableColumn", back_populates="table")
baselink = "tablemodelview"
__table_args__ = (
sqla.UniqueConstraint(
'database_id', 'schema', 'table_name',
name='_customer_location_uc'),)
def __repr__(self):
return self.table_name
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def link(self):
table_name = escape(self.table_name)
return Markup(
'<a href="{self.explore_url}">{table_name}</a>'.format(**locals()))
@property
def perm(self):
return (
"[{obj.database}].[{obj.table_name}]"
"(id:{obj.id})").format(obj=self)
@property
def name(self):
return self.table_name
@property
def full_name(self):
return "[{obj.database}].[{obj.table_name}]".format(obj=self)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def name(self):
return self.table_name
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
def get_col(self, col_name):
columns = self.table_columns
for col in columns:
if col_name == col.column_name:
return col
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15, row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
if metrics:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
# TODO: sqlalchemy 1.2 release should be doing this on its own.
# Patch only if the column clause is specific for DateTime set and
# granularity is selected.
@compiles(ColumnClause)
def _(element, compiler, **kw):
text = compiler.visit_column(element, **kw)
try:
if element.is_literal and hasattr(element.type, 'python_type') and \
type(element.type) is DateTime:
text = text.replace('%%', '%')
except NotImplementedError:
pass # Some elements raise NotImplementedError for python_type
return text
dttm_col = cols[granularity]
dttm_expr = dttm_col.sqla_col.label('timestamp')
timestamp = dttm_expr
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
if dttm_col.python_date_format == 'epoch_s':
dttm_expr = self.database.epoch_to_dttm().format(
col=dttm_expr)
elif dttm_col.python_date_format == 'epoch_ms':
dttm_expr = self.database.epoch_to_dttm(ms=True).format(
col=dttm_expr)
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr), type_=DateTime).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
outer_from = text(dttm_col.dttm_sql_literal(from_dttm))
outer_to = text(dttm_col.dttm_sql_literal(to_dttm))
time_filter = [
timestamp >= outer_from,
timestamp <= outer_to,
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= text(
dttm_col.dttm_sql_literal(inner_from_dttm))
if inner_to_dttm:
inner_time_filter[1] = timestamp <= text(
dttm_col.dttm_sql_literal(inner_to_dttm))
else:
inner_time_filter = []
select_exprs += metrics_exprs
qry = select(select_exprs)
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
# Supporting arbitrary SQL statements in place of tables
if self.sql:
tbl = text('(' + self.sql + ') as expr_qry ')
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '').strip() for types in splitted]
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras and 'where' in extras:
where_clause_and += [text(extras['where'])]
if extras and 'having' in extras:
having_clause_and += [text(extras['having'])]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
elif orderby:
for col, ascending in orderby:
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
# some sql dialects require for order by expressions
# to also be in the select clause
inner_select_exprs += [main_metric_expr]
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
subq = subq.order_by(desc(main_metric_expr))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(tbl)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True},),
)
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def get_sqla_table_object(self):
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception:
raise Exception(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information")
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = "{}".format(col.type).upper()
except Exception as e:
datatype = "UNKNOWN"
logging.error(
"Unrecognized data type in {}.{}".format(table, col.name))
logging.exception(e)
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.groupby = dbcol.is_string
dbcol.filterable = dbcol.is_string
dbcol.sum = dbcol.isnum
dbcol.is_dttm = dbcol.is_time
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
class SqlMetric(Model, AuditMixinNullable):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='metrics', foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
class TableColumn(Model, AuditMixinNullable):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='columns', foreign_keys=[table_id])
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
python_date_format = Column(String(255))
database_expression = Column(String(255))
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
date_types = ('DATE', 'TIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
def __repr__(self):
return self.column_name
@property
def isnum(self):
return any([t in self.type.upper() for t in self.num_types])
@property
def is_time(self):
return any([t in self.type.upper() for t in self.date_types])
@property
def is_string(self):
return any([t in self.type.upper() for t in self.str_types])
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
def dttm_sql_literal(self, dttm):
"""Convert datetime object to string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf == 'epoch_s':
return str((dttm - datetime(1970, 1, 1)).total_seconds())
elif tf == 'epoch_ms':
return str((dttm - datetime(1970, 1, 1)).total_seconds()*1000.0)
else:
default = "'{}'".format(dttm.strftime(tf))
iso = dttm.isoformat()
d = {
'mssql': "CONVERT(DATETIME, '{}', 126)".format(iso), # untested
'mysql': default,
'oracle':
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""".format(
dttm.isoformat()),
'presto': default,
'sqlite': default,
}
for k, v in d.items():
if self.table.database.sqlalchemy_uri.startswith(k):
return v
return default
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/"
"{obj.coordinator_endpoint}/datasources"
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/status"
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(self):
self.druid_version = self.get_druid_version()
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
@property
def perm(self):
return "[{obj.cluster_name}].(id:{obj.id})".format(obj=self)
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "druiddatasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(255), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='datasources', foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{obj.cluster_name}].[{obj.datasource_name}]"
"(id:{obj.id})").format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return (
"[{obj.cluster_name}]."
"[{obj.datasource_name}]").format(obj=self)
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1)
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=start)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db_from_config(cls, druid_config, user, cluster):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session()
datasource = (
session.query(DruidDatasource)
.filter_by(
datasource_name=druid_config['name'])
).first()
# Create a new datasource.
if not datasource:
datasource = DruidDatasource(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
dimensions = druid_config['dimensions']
for dim in dimensions:
col_obj = (
session.query(DruidColumn)
.filter_by(
datasource_name=druid_config['name'],
column_name=dim)
).first()
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type="STRING",
datasource=datasource
)
session.add(col_obj)
# Import Druid metrics
for metric_spec in druid_config["metrics_spec"]:
metric_name = metric_spec["name"]
metric_type = metric_spec["type"]
metric_json = json.dumps(metric_spec)
if metric_type == "count":
metric_type = "longSum"
metric_json = json.dumps({
"type": "longSum",
"name": metric_name,
"fieldName": metric_name,
})
metric_obj = (
session.query(DruidMetric)
.filter_by(
datasource_name=druid_config['name'],
metric_name=metric_name)
).first()
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name="%s(%s)" % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
"Imported from the airolap config dir for %s" %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
logging.info("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher("Adding new datasource [{}]".format(name), "success")
else:
flasher("Refreshing datasource [{}]".format(name), "info")
session.flush()
datasource.cluster = cluster
session.flush()
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if datatype == "hyperUnique" or datatype == "thetaSketch":
col_obj.count_distinct = True
if col_obj:
col_obj.type = cols[col]['type']
session.flush()
col_obj.datasource = datasource
col_obj.generate_metrics()
session.flush()
def query( # druid
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, ):
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_fields = _conf.get('fields', [])
field_names = []
for _f in _fields:
_type = _f.get('type')
if _type in ['fieldAccess', 'hyperUniqueCardinality']:
field_names.append(_f.get('fieldName'))
elif _type == 'arithmetic':
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
conf = metric.json_obj
all_metrics += recursive_get_fields(conf)
all_metrics += conf.get('fieldNames', [])
if conf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=conf.get('name'),
field_names=conf.get('fieldNames'),
function=conf.get('function'))
else:
post_aggs[metric_name] = Postaggregator(
conf.get('fn', "/"),
conf.get('fields', []),
conf.get('name', ''))
aggregations = {
m.metric_name: m.json_obj
for m in self.metrics
if m.metric_name in all_metrics
}
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
"Access to the metrics denied: " + ', '.join(rejected_metrics)
)
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(
granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
origin = extras.get('druid_time_origin')
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = self.get_filters(filter)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = "all"
pre_qry['limit_spec'] = {
"type": "default",
"limit": timeseries_limit,
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry['dimensions']
filters = []
for unused, row in df.iterrows():
fields = []
for dim in dims:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(filt)
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry['filter'] = ff
else:
qry['filter'] = Filter(type="and", fields=[
ff,
orig_filters])
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
"type": "default",
"limit": row_limit,
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_("No data was returned."))
if (
not is_timeseries and
granularity == "all" and
'timestamp' in df.columns):
del df['timestamp']
# Reordering columns
cols = []
if 'timestamp' in df.columns:
cols += ['timestamp']
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
df = df[cols]
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters):
filters = None
for col, op, eq in raw_filters:
cond = None
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = ~(Dimension(col) == eq)
elif op in ('in', 'not in'):
fields = []
# Distinguish quoted values with regular value types
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '') for types in splitted]
if len(values) > 1:
for s in values:
s = s.strip()
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type="regex", pattern=eq, dimension=col)
if filters:
filters = Filter(type="and", fields=[
cond,
filters
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>'
}
for col, op, eq in raw_filters:
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
class Log(Model):
"""ORM object used to log Caravel actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship('User', backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=func.now())
dt = Column(Date, default=date.today())
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
d.update(kwargs)
slice_id = d.get('slice_id', 0)
try:
slice_id = int(slice_id) if slice_id else 0
except ValueError:
slice_id = 0
params = ""
try:
params = json.dumps(d)
except:
pass
log = cls(
action=f.__name__,
json=params,
dashboard_id=d.get('dashboard_id') or None,
slice_id=slice_id,
user_id=user_id)
db.session.add(log)
db.session.commit()
return f(*args, **kwargs)
return wrapper
class DruidMetric(Model, AuditMixinNullable):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='metrics',
enable_typechecks=False)
json = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.datasource.full_name
) if self.datasource else None
class DruidColumn(Model, AuditMixinNullable):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
id = Column(Integer, primary_key=True)
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='columns',
enable_typechecks=False)
column_name = Column(String(255))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return self.type in ('LONG', 'DOUBLE', 'FLOAT', 'INT')
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = DruidMetric # noqa
metrics = []
metrics.append(DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'})
))
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.isnum:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.min and self.isnum:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.max and self.isnum:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name
})
))
else:
mt = 'count_distinct'
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]})
))
session = get_session()
new_metrics = []
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
new_metrics.append(metric)
session.add(metric)
session.flush()
utils.init_metrics_perm(caravel, new_metrics)
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=func.now())
class QueryStatus:
CANCELLED = 'cancelled'
FAILED = 'failed'
PENDING = 'pending'
RUNNING = 'running'
SCHEDULED = 'scheduled'
SUCCESS = 'success'
TIMED_OUT = 'timed_out'
class Query(Model):
"""ORM model for SQL query"""
__tablename__ = 'query'
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(
Integer, ForeignKey('ab_user.id'), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the caravel config.
limit = Column(Integer)
limit_used = Column(Boolean, default=False)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=3))
end_time = Column(Numeric(precision=3))
changed_on = Column(
DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True)
database = relationship(
'Database', foreign_keys=[database_id], backref='queries')
__table_args__ = (
sqla.Index('ti_user_id_changed_on', user_id, changed_on),
)
def to_dict(self):
return {
'changedOn': self.changed_on,
'changed_on': self.changed_on.isoformat(),
'dbId': self.database_id,
'endDttm': self.end_time,
'errorMessage': self.error_message,
'executedSql': self.executed_sql,
'id': self.client_id,
'limit': self.limit,
'progress': self.progress,
'rows': self.rows,
'schema': self.schema,
'ctas': self.select_as_cta,
'serverId': self.id,
'sql': self.sql,
'sqlEditorId': self.sql_editor_id,
'startDttm': self.start_time,
'state': self.status.lower(),
'tab': self.tab_name,
'tempTable': self.tmp_table_name,
'userId': self.user_id,
}
@property
def name(self):
ts = datetime.now().isoformat()
ts = ts.replace('-', '').replace(':', '').split('.')[0]
tab = self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab'
tab = re.sub(r'\W+', '', tab)
return "sqllab_{tab}_{ts}".format(**locals())
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = 'access_request'
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(['Admin', 'Alpha', 'Gamma', 'Public'])
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link
@property
def roles_with_datasource(self):
action_list = ''
pv = sm.find_permission_view_menu(
'datasource_access', self.datasource.perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_grant={r.name}'
.format(**locals())
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
@property
def user_roles(self):
action_list = ''
for r in self.created_by.roles:
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_extend={r.name}'
.format(**locals())
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = "{} Role".format(r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
class EchartMapType(Model):
"""
the map tile file object!
"""
__tablename__ = "echart_map_type"
id = Column(Integer, primary_key=True)
file = Column(FileColumn, nullable=False)
map_name = Column(String(150))
def download(self):
return Markup(
'<a href="' + url_for('EchartMapTypeModelView.download', filename=str(self.file)) + '">Download</a>')
def file_name(self):
return get_file_original_name(str(self.file))
@property
def map_url(self):
return url_for('EchartMapTypeModelView.download', filename=str(self.file)) | {
"repo_name": "wbsljh/caravel",
"path": "caravel/models.py",
"copies": "1",
"size": "73950",
"license": "apache-2.0",
"hash": -1222798785848183600,
"line_mean": 34.0312648034,
"line_max": 113,
"alpha_frac": 0.5446382691,
"autogenerated": false,
"ratio": 3.975165295920013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019803565020012,
"avg_score": null,
"num_lines": null
} |
"""A collection of ORM sqlalchemy models for Caravel"""
from copy import deepcopy, copy
from collections import namedtuple
from datetime import timedelta, datetime, date
import functools
import json
import logging
from six import string_types
import sqlparse
import requests
from dateutil.parser import parse
from flask import flash, request, g
from flask.ext.appbuilder import Model
from flask.ext.appbuilder.models.mixins import AuditMixin
import pandas as pd
import humanize
from pydruid import client
from pydruid.utils.filters import Dimension, Filter
import sqlalchemy as sqla
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean, DateTime, Date,
Table, create_engine, MetaData, desc, select, and_, func)
from sqlalchemy.engine import reflection
from sqlalchemy.orm import relationship
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy_utils import EncryptedType
from caravel import app, db, get_session, utils
from caravel.viz import viz_types
from sqlalchemy.ext.declarative import declared_attr
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls):
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls):
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@property
def created_by_(self):
return '{}'.format(self.created_by or '')
@property # noqa
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@property
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return '<span class="no-wrap">{}</nobr>'.format(s)
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
class Slice(Model, AuditMixinNullable):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
druid_datasource_id = Column(Integer, ForeignKey('datasources.id'))
table_id = Column(Integer, ForeignKey('tables.id'))
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
table = relationship(
'SqlaTable', foreign_keys=[table_id], backref='slices')
druid_datasource = relationship(
'DruidDatasource', foreign_keys=[druid_datasource_id], backref='slices')
def __repr__(self):
return self.slice_name
@property
def datasource(self):
return self.table or self.druid_datasource
@property
def datasource_link(self):
if self.table:
return self.table.link
elif self.druid_datasource:
return self.druid_datasource.link
@property
def datasource_edit_url(self):
if self.table:
return self.table.url
elif self.druid_datasource:
return self.druid_datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz = viz_types[self.viz_type](
self.datasource,
form_data=d)
return viz
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def datasource_id(self):
return self.table_id or self.druid_datasource_id
@property
def data(self):
d = self.viz.data
d['slice_id'] = self.id
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{self.datasource_type}/"
"{self.datasource_id}/".format(self=self))
return href(slice_params)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
return '<a href="{url}">{self.slice_name}</a>'.format(
url=url, self=self)
dashboard_slices = Table(
'dashboard_slices', Model.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
class Dashboard(Model, AuditMixinNullable):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
def __repr__(self):
return self.dashboard_title
@property
def url(self):
return "/caravel/dashboard/{}/".format(self.slug or self.id)
@property
def metadata_dejson(self):
if self.json_metadata:
return json.loads(self.json_metadata)
else:
return {}
def dashboard_link(self):
return '<a href="{self.url}">{self.dashboard_title}</a>'.format(self=self)
@property
def json_data(self):
d = {
'id': self.id,
'metadata': self.metadata_dejson,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
}
return json.dumps(d)
class Queryable(object):
"""A common interface to objects that are queryable (tables and datasources)"""
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
id = Column(Integer, primary_key=True)
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
def __repr__(self):
return self.database_name
def get_sqla_engine(self):
return create_engine(self.sqlalchemy_uri_decrypted)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
Grain = namedtuple('Grain', 'name function')
db_time_grains = {
'presto': (
Grain('Time Column', '{col}'),
Grain('week', "date_trunc('week', CAST({col} AS DATE))"),
Grain('month', "date_trunc('month', CAST({col} AS DATE))"),
Grain("week_ending_saturday", "date_add('day', 5, "
"date_trunc('week', date_add('day', 1, CAST({col} AS DATE))))"),
Grain("week_start_sunday", "date_add('day', -1, "
"date_trunc('week', date_add('day', 1, CAST({col} AS DATE))))")
),
'mysql': (
Grain('Time Column', '{col}'),
Grain('day', 'DATE({col})'),
Grain('week', 'DATE_SUB({col}, INTERVAL DAYOFWEEK({col}) - 1 DAY)'),
Grain('month', 'DATE_SUB({col}, INTERVAL DAYOFMONTH({col}) - 1 DAY)'),
),
}
for db_type, grains in db_time_grains.items():
if self.sqlalchemy_uri.startswith(db_type):
return grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def get_table(self, table_name):
meta = MetaData()
return Table(
table_name, meta,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name):
engine = self.get_sqla_engine()
insp = reflection.Inspector.from_engine(engine)
return insp.get_columns(table_name)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/caravel/sql/{}/'.format(self.id)
@property
def sql_link(self):
return '<a href="{}">SQL</a>'.format(self.sql_url)
class SqlaTable(Model, Queryable, AuditMixinNullable):
"""An ORM object for SqlAlchemy table references"""
type = "table"
__tablename__ = 'tables'
id = Column(Integer, primary_key=True)
table_name = Column(String(250), unique=True)
main_dttm_col = Column(String(250))
description = Column(Text)
default_endpoint = Column(Text)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
is_featured = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='tables', foreign_keys=[user_id])
database = relationship(
'Database', backref='tables', foreign_keys=[database_id])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
baselink = "tablemodelview"
def __repr__(self):
return self.table_name
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def url(self):
return '/tablemodelview/edit/{}'.format(self.id)
@property
def link(self):
return '<a href="{self.url}">{self.table_name}</a>'.format(**locals())
@property
def perm(self):
return (
"[{self.database}].[{self.table_name}]"
"(id:{self.id})").format(self=self)
@property
def full_name(self):
return "[{self.database}].[{self.table_name}]".format(self=self)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def name(self):
return self.table_name
@property
def table_link(self):
url = "/caravel/explore/{self.type}/{self.id}/".format(self=self)
return '<a href="{url}">{self.table_name}</a>'.format(
url=url, self=self)
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
@property
def sql_link(self):
return '<a href="{}">SQL</a>'.format(self.sql_url)
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15, row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(
"Datetime column not provided as part table configuration "
"and is required by this type of chart")
metrics_exprs = [
literal_column(m.expression).label(m.metric_name)
for m in self.metrics if m.metric_name in metrics]
if metrics:
main_metric_expr = literal_column([
m.expression for m in self.metrics
if m.metric_name == metrics[0]][0])
else:
main_metric_expr = literal_column("COUNT(*)")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
expr = col.expression
if expr:
outer = literal_column(expr).label(s)
inner = literal_column(expr).label('__' + s)
else:
outer = column(s).label(s)
inner = column(s).label('__' + s)
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(s)
metrics_exprs = []
if granularity:
dttm_expr = cols[granularity].expression or granularity
timestamp = literal_column(dttm_expr).label('timestamp')
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr)).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
tf = '%Y-%m-%d %H:%M:%S.%f'
time_filter = [
timestamp >= from_dttm.strftime(tf),
timestamp <= to_dttm.strftime(tf),
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= inner_from_dttm.strftime(tf)
if inner_to_dttm:
inner_time_filter[1] = timestamp <= inner_to_dttm.strftime(tf)
select_exprs += metrics_exprs
qry = select(select_exprs)
from_clause = table(self.table_name)
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
values = eq.split(",")
if col_obj.expression:
cond = ColumnClause(
col_obj.expression, is_literal=True).in_(values)
else:
cond = column(col).in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras and 'where' in extras:
where_clause_and += [text(extras['where'])]
if extras and 'having' in extras:
having_clause_and += [text(extras['having'])]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
subq = select(inner_select_exprs)
subq = subq.select_from(table(self.table_name))
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
subq = subq.order_by(desc(main_metric_expr))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column("__" + gb))
from_clause = from_clause.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(from_clause)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(engine, compile_kwargs={"literal_binds": True}))
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
table = self.database.get_table(self.table_name)
try:
table = self.database.get_table(self.table_name)
except Exception as e:
flash(str(e))
flash(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information", "danger")
return
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = str(col.type)
except Exception as e:
datatype = "UNKNOWN"
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name)
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
datatype = str(datatype).upper()
if (
str(datatype).startswith('VARCHAR') or
str(datatype).startswith('STRING')):
dbcol.groupby = True
dbcol.filterable = True
elif any([t in datatype for t in num_types]):
dbcol.sum = True
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and 'date' in datatype.lower():
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
class SqlMetric(Model, AuditMixinNullable):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='metrics', foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
class TableColumn(Model, AuditMixinNullable):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='columns', foreign_keys=[table_id])
column_name = Column(String(256))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
def __repr__(self):
return self.column_name
@property
def isnum(self):
types = ('LONG', 'DOUBLE', 'FLOAT', 'BIGINT', 'INT')
return any([t in self.type.upper() for t in types])
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(256))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(256), default='druid/coordinator/v1/metadata')
broker_host = Column(String(256))
broker_port = Column(Integer)
broker_endpoint = Column(String(256), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = client.PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def refresh_datasources(self):
endpoint = (
"http://{self.coordinator_host}:{self.coordinator_port}/"
"{self.coordinator_endpoint}/datasources"
).format(self=self)
datasources = json.loads(requests.get(endpoint).text)
for datasource in datasources:
DruidDatasource.sync_to_db(datasource, self)
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "datasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(250), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='datasources', foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{self.cluster_name}].[{self.datasource_name}]"
"(id:{self.id})").format(self=self)
@property
def url(self):
return '/datasourcemodelview/edit/{}'.format(self.id)
@property
def link(self):
return (
'<a href="{self.url}">'
'{self.datasource_name}</a>').format(**locals())
@property
def full_name(self):
return (
"[{self.cluster_name}]."
"[{self.datasource_name}]").format(self=self)
def __repr__(self):
return self.datasource_name
@property
def datasource_link(self):
url = "/caravel/explore/{self.type}/{self.id}/".format(self=self)
return '<a href="{url}">{self.datasource_name}</a>'.format(
url=url, self=self)
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=1)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
print("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flash("Adding new datasource [{}]".format(name), "success")
else:
flash("Refreshing datasource [{}]".format(name), "info")
datasource.cluster = cluster
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if col_obj:
col_obj.type = cols[col]['type']
col_obj.datasource = datasource
col_obj.generate_metrics()
def query(
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
extras=None, # noqa
select=None,): # noqa
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
aggregations = {
m.metric_name: m.json_obj
for m in self.metrics if m.metric_name in metrics
}
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(
granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = None
for col, op, eq in filter:
cond = None
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = ~(Dimension(col) == eq)
elif op in ('in', 'not in'):
fields = []
splitted = eq.split(',')
if len(splitted) > 1:
for s in eq.split(','):
s = s.strip()
fields.append(Filter.build_filter(Dimension(col) == s))
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == 'not in':
cond = ~cond
if filters:
filters = Filter(type="and", fields=[
Filter.build_filter(cond),
Filter.build_filter(filters)
])
else:
filters = cond
if filters:
qry['filter'] = filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = "all"
pre_qry['limit_spec'] = {
"type": "default",
"limit": timeseries_limit,
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(client.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry['dimensions']
filters = []
for _, row in df.iterrows():
fields = []
for dim in dims:
f = Filter.build_filter(Dimension(dim) == row[dim])
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(Filter.build_filter(filt))
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry['filter'] = ff
else:
qry['filter'] = Filter(type="and", fields=[
Filter.build_filter(ff),
Filter.build_filter(orig_filters)])
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
"type": "default",
"limit": row_limit,
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**qry)
query_str += json.dumps(client.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception("No data was returned.")
if (
not is_timeseries and
granularity == "all" and
'timestamp' in df.columns):
del df['timestamp']
# Reordering columns
cols = []
if 'timestamp' in df.columns:
cols += ['timestamp']
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
cols += [col for col in df.columns if col not in cols]
df = df[cols]
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
class Log(Model):
"""ORM object used to log Caravel actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
user_id = Column(Integer, ForeignKey('ab_user.id'))
json = Column(Text)
user = relationship('User', backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=func.now())
dt = Column(Date, default=date.today())
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.id
d = request.args.to_dict()
d.update(kwargs)
log = cls(
action=f.__name__,
json=json.dumps(d),
dashboard_id=d.get('dashboard_id') or None,
slice_id=d.get('slice_id') or None,
user_id=user_id)
db.session.add(log)
db.session.commit()
return f(*args, **kwargs)
return wrapper
class DruidMetric(Model, AuditMixinNullable):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
datasource_name = Column(
String(250),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='metrics',
enable_typechecks=False)
json = Column(Text)
description = Column(Text)
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
class DruidColumn(Model, AuditMixinNullable):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
id = Column(Integer, primary_key=True)
datasource_name = Column(
String(250),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='columns',
enable_typechecks=False)
column_name = Column(String(256))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return self.type in ('LONG', 'DOUBLE', 'FLOAT')
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = DruidMetric # noqa
metrics = []
metrics.append(DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'})
))
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.isnum:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.min and self.isnum:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.max and self.isnum:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.count_distinct:
mt = 'count_distinct'
name = 'count_distinct__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]})
))
session = get_session()
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
session.add(metric)
session.commit()
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=func.now())
| {
"repo_name": "thebucknerlife/caravel",
"path": "caravel/models.py",
"copies": "1",
"size": "42278",
"license": "apache-2.0",
"hash": -5922760950915400000,
"line_mean": 32.795363709,
"line_max": 86,
"alpha_frac": 0.5552769762,
"autogenerated": false,
"ratio": 4.04032874617737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509560572237737,
"avg_score": null,
"num_lines": null
} |
"""A collection of ORM sqlalchemy models for SQL Lab"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import re
from flask import Markup
from flask_appbuilder import Model
from future.standard_library import install_aliases
import sqlalchemy as sqla
from sqlalchemy import (
Boolean, Column, DateTime, ForeignKey, Integer, Numeric, String, Text,
)
from sqlalchemy.orm import backref, relationship
from superset import sm
from superset.models.helpers import AuditMixinNullable
from superset.utils import QueryStatus
install_aliases()
class Query(Model):
"""ORM model for SQL query"""
__tablename__ = 'query'
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True, nullable=False)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(Integer, ForeignKey('ab_user.id'), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the superset config.
limit = Column(Integer)
limit_used = Column(Boolean, default=False)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# key used to store the results in the results backend
results_key = Column(String(64), index=True)
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=20, scale=6))
start_running_time = Column(Numeric(precision=20, scale=6))
end_time = Column(Numeric(precision=20, scale=6))
end_result_backend_time = Column(Numeric(precision=20, scale=6))
tracking_url = Column(Text)
changed_on = Column(
DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow,
nullable=True)
database = relationship(
'Database',
foreign_keys=[database_id],
backref=backref('queries', cascade='all, delete-orphan'))
user = relationship(sm.user_model, foreign_keys=[user_id])
__table_args__ = (
sqla.Index('ti_user_id_changed_on', user_id, changed_on),
)
@property
def limit_reached(self):
return self.rows == self.limit if self.limit_used else False
def to_dict(self):
return {
'changedOn': self.changed_on,
'changed_on': self.changed_on.isoformat(),
'dbId': self.database_id,
'db': self.database.database_name,
'endDttm': self.end_time,
'errorMessage': self.error_message,
'executedSql': self.executed_sql,
'id': self.client_id,
'limit': self.limit,
'progress': self.progress,
'rows': self.rows,
'schema': self.schema,
'ctas': self.select_as_cta,
'serverId': self.id,
'sql': self.sql,
'sqlEditorId': self.sql_editor_id,
'startDttm': self.start_time,
'state': self.status.lower(),
'tab': self.tab_name,
'tempTable': self.tmp_table_name,
'userId': self.user_id,
'user': self.user.username,
'limit_reached': self.limit_reached,
'resultsKey': self.results_key,
'trackingUrl': self.tracking_url,
}
@property
def name(self):
"""Name property"""
ts = datetime.now().isoformat()
ts = ts.replace('-', '').replace(':', '').split('.')[0]
tab = (self.tab_name.replace(' ', '_').lower()
if self.tab_name else 'notab')
tab = re.sub(r'\W+', '', tab)
return 'sqllab_{tab}_{ts}'.format(**locals())
class SavedQuery(Model, AuditMixinNullable):
"""ORM model for SQL query"""
__tablename__ = 'saved_query'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'), nullable=True)
db_id = Column(Integer, ForeignKey('dbs.id'), nullable=True)
schema = Column(String(128))
label = Column(String(256))
description = Column(Text)
sql = Column(Text)
user = relationship(
sm.user_model,
backref=backref('saved_queries', cascade='all, delete-orphan'),
foreign_keys=[user_id])
database = relationship(
'Database',
foreign_keys=[db_id],
backref=backref('saved_queries', cascade='all, delete-orphan'))
@property
def pop_tab_link(self):
return Markup("""
<a href="/superset/sqllab?savedQueryId={self.id}">
<i class="fa fa-link"></i>
</a>
""".format(**locals()))
| {
"repo_name": "alanmcruickshank/superset-dev",
"path": "superset/models/sql_lab.py",
"copies": "1",
"size": "5286",
"license": "apache-2.0",
"hash": -5829946089397358000,
"line_mean": 33.3246753247,
"line_max": 74,
"alpha_frac": 0.6208853575,
"autogenerated": false,
"ratio": 3.7838224767358626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904707834235862,
"avg_score": 0,
"num_lines": 154
} |
"""A collection of ORM sqlalchemy models for Superset"""
#-*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import json
import logging
import numpy
import pickle
import textwrap
from future.standard_library import install_aliases
from copy import copy
from datetime import datetime, date
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.engine.url import make_url
from sqlalchemy.orm import subqueryload
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime, Date, Table,
create_engine, MetaData, select
)
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import make_transient
from sqlalchemy.sql import text
from sqlalchemy.sql.expression import TextAsFrom
from sqlalchemy_utils import EncryptedType
from superset import app, db, db_engine_specs, utils, sm
from superset.connectors.connector_registry import ConnectorRegistry
from superset.viz import viz_types
from superset.models.helpers import AuditMixinNullable, ImportMixin, set_perm
install_aliases()
from urllib import parse # noqa
config = app.config
stats_logger = config.get('STATS_LOGGER')
metadata = Model.metadata # pylint: disable=no-member
def set_related_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
if id_:
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
if ds:
target.perm = ds.perm
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class KeyValue(Model):
"""Used for any type of key-value store"""
__tablename__ = 'keyvalue'
id = Column(Integer, primary_key=True)
value = Column(Text, nullable=False)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable, ImportMixin):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(1000))
owners = relationship(sm.user_model, secondary=slice_user)
export_fields = ('slice_name', 'datasource_type', 'datasource_name',
'viz_type', 'params', 'cache_timeout')
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
return (
db.session.query(self.cls_model)
.filter_by(id=self.datasource_id)
.first()
)
@renders('datasource_name')
def datasource_link(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.link if datasource else None
@property
def datasource_edit_url(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.url if datasource else None
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
# pylint: disable=no-member
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
logging.exception(e)
d['error'] = str(e)
return {
'datasource': self.datasource_name,
'description': self.description,
'description_markeddown': self.description_markeddown,
'edit_url': self.edit_url,
'form_data': self.form_data,
'slice_id': self.id,
'slice_name': self.slice_name,
'slice_url': self.slice_url,
}
@property
def json_data(self):
return json.dumps(self.data)
@property
def form_data(self):
form_data = json.loads(self.params)
form_data.update({
'slice_id': self.id,
'viz_type': self.viz_type,
'datasource': str(self.datasource_id) + '__' + self.datasource_type
})
if self.cache_timeout:
form_data['cache_timeout'] = self.cache_timeout
return form_data
@property
def slice_url(self):
"""Defines the url to access the slice"""
return (
"/superset/explore/{obj.datasource_type}/"
"{obj.datasource_id}/?form_data={params}".format(
obj=self, params=parse.quote(json.dumps(self.form_data))))
@property
def slice_id_url(self):
return (
"/superset/{slc.datasource_type}/{slc.datasource_id}/{slc.id}/"
).format(slc=self)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self, url_params_multidict=None):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:param werkzeug.datastructures.MultiDict url_params_multidict:
Contains the visualization params, they override the self.params
stored in the database
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params)
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else "table"
return viz_types[slice_params.get('viz_type')](
self.datasource,
form_data=slice_params,
)
@classmethod
def import_obj(cls, slc_to_import, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(
remote_id=slc_to_import.id, import_time=import_time)
# find if the slice was already imported
slc_to_override = None
for slc in session.query(Slice).all():
if ('remote_id' in slc.params_dict and
slc.params_dict['remote_id'] == slc_to_import.id):
slc_to_override = slc
slc_to_import = slc_to_import.copy()
params = slc_to_import.params_dict
slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
session, slc_to_import.datasource_type, params['datasource_name'],
params['schema'], params['database_name']).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logging.info('Final slice: {}'.format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
sqla.event.listen(Slice, 'before_insert', set_related_perm)
sqla.event.listen(Slice, 'before_update', set_related_perm)
dashboard_slices = Table(
'dashboard_slices', metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id'))
)
class Dashboard(Model, AuditMixinNullable, ImportMixin):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship(sm.user_model, secondary=dashboard_user)
export_fields = ('dashboard_title', 'position_json', 'json_metadata',
'description', 'css', 'slug')
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
# pylint: disable=no-member
return ", ".join(
{"{}".format(s.datasource.full_name) for s in self.slices})
@property
def url(self):
return "/superset/dashboard/{}/".format(self.slug or self.id)
@property
def datasources(self):
return {slc.datasource for slc in self.slices}
@property
def sqla_metadata(self):
# pylint: disable=no-member
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title)
return Markup(
'<a href="{self.url}">{title}</a>'.format(**locals()))
@property
def data(self):
positions = self.position_json
if positions:
positions = json.loads(positions)
return {
'id': self.id,
'metadata': self.params_dict,
'css': self.css,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': positions,
}
@property
def params(self):
return self.json_metadata
@params.setter
def params(self, value):
self.json_metadata = value
@property
def position_array(self):
if self.position_json:
return json.loads(self.position_json)
return []
@classmethod
def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copies over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position json:
[{
"col": 5,
"row": 10,
"size_x": 4,
"size_y": 2,
"slice_id": "3610"
}]
"""
position_array = dashboard.position_array
for position in position_array:
if 'slice_id' not in position:
continue
old_slice_id = int(position['slice_id'])
if old_slice_id in old_to_new_slc_id_dict:
position['slice_id'] = '{}'.format(
old_to_new_slc_id_dict[old_slice_id])
dashboard.position_json = json.dumps(position_array)
logging.info('Started import of the dashboard: {}'
.format(dashboard_to_import.to_json()))
session = db.session
logging.info('Dashboard has {} slices'
.format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
for slc in slices:
logging.info('Importing slice {} from the dashboard: {}'.format(
slc.to_json(), dashboard_to_import.dashboard_title))
new_slc_id = Slice.import_obj(slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = '{}'.format(new_slc_id)
old_slc_id_str = '{}'.format(slc.id)
if ('filter_immune_slices' in i_params_dict and
old_slc_id_str in i_params_dict['filter_immune_slices']):
new_filter_immune_slices.append(new_slc_id_str)
if ('expanded_slices' in i_params_dict and
old_slc_id_str in i_params_dict['expanded_slices']):
new_expanded_slices[new_slc_id_str] = (
i_params_dict['expanded_slices'][old_slc_id_str])
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if ('remote_id' in dash.params_dict and
dash.params_dict['remote_id'] ==
dashboard_to_import.id):
existing_dashboard = dash
dashboard_to_import.id = None
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(
expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices)
new_slices = session.query(Slice).filter(
Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
# session.add(dashboard_to_import) causes sqlachemy failures
# related to the attached users / slices. Creating new object
# allows to avoid conflicts in the sql alchemy state.
copied_dash = dashboard_to_import.copy()
copied_dash.slices = new_slices
session.add(copied_dash)
session.flush()
return copied_dash.id
@classmethod
def export_dashboards(cls, dashboard_ids):
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
copied_dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id).first()
)
make_transient(copied_dashboard)
for slc in copied_dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
# add extra params for the import
slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.name,
schema=slc.datasource.name,
database_name=slc.datasource.database.name,
)
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for dashboard_id, dashboard_type in datasource_ids:
eager_datasource = ConnectorRegistry.get_eager_datasource(
db.session, dashboard_type, dashboard_id)
eager_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.name,
)
make_transient(eager_datasource)
eager_datasources.append(eager_datasource)
return pickle.dumps({
'dashboards': copied_dashboards,
'datasources': eager_datasources,
})
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
type = "table"
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=False)
allow_run_sync = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
perm = Column(String(1000))
def __repr__(self):
return self.verbose_name if self.verbose_name else self.database_name
@property
def name(self):
return self.verbose_name if self.verbose_name else self.database_name
@property
def unique_name(self):
return self.database_name
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
def set_sqlalchemy_uri(self, uri):
password_mask = "X" * 10
conn = sqla.engine.url.make_url(uri)
if conn.password != password_mask:
# do not over-write the password with the password mask
self.password = conn.password
conn.password = password_mask if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_sqla_engine(self, schema=None):
extra = self.get_extra()
uri = make_url(self.sqlalchemy_uri_decrypted)
params = extra.get('engine_params', {})
uri = self.db_engine_spec.adjust_database_uri(uri, schema)
return create_engine(uri, **params)
def get_reserved_words(self):
return self.get_sqla_engine().dialect.preparer.reserved_words
def get_quoter(self):
return self.get_sqla_engine().dialect.identifier_preparer.quote
def get_df(self, sql, schema):
sql = sql.strip().strip(';')
eng = self.get_sqla_engine(schema=schema)
df = pd.read_sql(sql, eng)
def needs_conversion(df_series):
if df_series.empty:
return False
if isinstance(df_series[0], (list, dict)):
return True
return False
for k, v in df.dtypes.iteritems():
if v.type == numpy.object_ and needs_conversion(df[k]):
df[k] = df[k].apply(utils.json_dumps_w_dates)
return df
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
compiled = qry.compile(eng, compile_kwargs={"literal_binds": True})
return '{}'.format(compiled)
def select_star(
self, table_name, schema=None, limit=100, show_cols=False,
indent=True, latest_partition=True):
"""Generates a ``select *`` statement in the proper dialect"""
return self.db_engine_spec.select_star(
self, table_name, schema=schema, limit=limit, show_cols=show_cols,
indent=indent, latest_partition=latest_partition)
def wrap_sql_limit(self, sql, limit=1000):
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*'])
.alias('inner_qry')
).limit(limit)
)
return self.compile_sqla_query(qry)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
def all_table_names(self, schema=None, force=False):
if not schema:
tables_dict = self.db_engine_spec.fetch_result_sets(
self, 'table', force=force)
return tables_dict.get("", [])
return sorted(
self.db_engine_spec.get_table_names(schema, self.inspector))
def all_view_names(self, schema=None, force=False):
if not schema:
views_dict = self.db_engine_spec.fetch_result_sets(
self, 'view', force=force)
return views_dict.get("", [])
views = []
try:
views = self.inspector.get_view_names(schema)
except Exception:
pass
return views
def all_schema_names(self):
return sorted(self.inspector.get_schema_names())
@property
def db_engine_spec(self):
return db_engine_specs.engines.get(
self.backend, db_engine_specs.BaseEngineSpec)
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
return self.db_engine_spec.time_grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name, schema=None):
return self.inspector.get_columns(table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
def get_pk_constraint(self, table_name, schema=None):
return self.inspector.get_pk_constraint(table_name, schema)
def get_foreign_keys(self, table_name, schema=None):
return self.inspector.get_foreign_keys(table_name, schema)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/superset/sql/{}/'.format(self.id)
def get_perm(self):
return (
u"[{obj.database_name}].(id:{obj.id})").format(obj=self)
sqla.event.listen(Database, 'after_insert', set_perm)
sqla.event.listen(Database, 'after_update', set_perm)
class Log(Model):
"""ORM object used to log Superset actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship(sm.user_model, backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=datetime.utcnow)
dt = Column(Date, default=date.today())
duration_ms = Column(Integer)
referrer = Column(String(1024))
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
start_dttm = datetime.now()
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
post_data = request.form or {}
d.update(post_data)
d.update(kwargs)
slice_id = d.get('slice_id', 0)
try:
slice_id = int(slice_id) if slice_id else 0
except ValueError:
slice_id = 0
params = ""
try:
params = json.dumps(d)
except:
pass
stats_logger.incr(f.__name__)
value = f(*args, **kwargs)
sesh = db.session()
log = cls(
action=f.__name__,
json=params,
dashboard_id=d.get('dashboard_id') or None,
slice_id=slice_id,
duration_ms=(
datetime.now() - start_dttm).total_seconds() * 1000,
referrer=request.referrer[:1000] if request.referrer else None,
user_id=user_id)
sesh.add(log)
sesh.commit()
return value
return wrapper
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=datetime.utcnow)
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = 'access_request'
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(config.get('ROBOT_PERMISSION_ROLES', []))
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
# pylint: disable=no-member
ds = db.session.query(self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link # pylint: disable=no-member
@property
def roles_with_datasource(self):
action_list = ''
perm = self.datasource.perm # pylint: disable=no-member
pv = sm.find_permission_view_menu('datasource_access', perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
url = (
'/superset/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_grant={r.name}'
.format(**locals())
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
@property
def user_roles(self):
action_list = ''
for r in self.created_by.roles: # pylint: disable=no-member
url = (
'/superset/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_extend={r.name}'
.format(**locals())
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = "{} Role".format(r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
| {
"repo_name": "BRD-CD/superset",
"path": "superset/models/core.py",
"copies": "1",
"size": "29305",
"license": "apache-2.0",
"hash": 2701986638010991600,
"line_mean": 33.4359576968,
"line_max": 79,
"alpha_frac": 0.5978501962,
"autogenerated": false,
"ratio": 3.926705078386708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001316060862374662,
"num_lines": 851
} |
"""A collection of ORM sqlalchemy models for Superset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from copy import copy, deepcopy
from datetime import date, datetime
import functools
import json
import logging
import pickle
import textwrap
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from future.standard_library import install_aliases
import numpy
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy import (
Boolean, Column, create_engine, Date, DateTime, ForeignKey, Integer,
MetaData, select, String, Table, Text,
)
from sqlalchemy.engine import url
from sqlalchemy.engine.url import make_url
from sqlalchemy.orm import relationship, subqueryload
from sqlalchemy.orm.session import make_transient
from sqlalchemy.pool import NullPool
from sqlalchemy.sql import text
from sqlalchemy.sql.expression import TextAsFrom
from sqlalchemy_utils import EncryptedType
from superset import app, db, db_engine_specs, sm, utils
from superset.connectors.connector_registry import ConnectorRegistry
from superset.models.helpers import AuditMixinNullable, ImportMixin, set_perm
from superset.viz import viz_types
install_aliases()
from urllib import parse # noqa
config = app.config
stats_logger = config.get('STATS_LOGGER')
metadata = Model.metadata # pylint: disable=no-member
PASSWORD_MASK = 'X' * 10
def set_related_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
if id_:
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
if ds:
target.perm = ds.perm
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class KeyValue(Model):
"""Used for any type of key-value store"""
__tablename__ = 'keyvalue'
id = Column(Integer, primary_key=True)
value = Column(Text, nullable=False)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id')))
class Slice(Model, AuditMixinNullable, ImportMixin):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(1000))
owners = relationship(sm.user_model, secondary=slice_user)
export_fields = ('slice_name', 'datasource_type', 'datasource_name',
'viz_type', 'params', 'cache_timeout')
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
def clone(self):
return Slice(
slice_name=self.slice_name,
datasource_id=self.datasource_id,
datasource_type=self.datasource_type,
datasource_name=self.datasource_name,
viz_type=self.viz_type,
params=self.params,
description=self.description,
cache_timeout=self.cache_timeout)
@datasource.getter
@utils.memoized
def get_datasource(self):
return (
db.session.query(self.cls_model)
.filter_by(id=self.datasource_id)
.first()
)
@renders('datasource_name')
def datasource_link(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.link if datasource else None
@property
def datasource_edit_url(self):
# pylint: disable=no-member
datasource = self.datasource
return datasource.url if datasource else None
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
# pylint: disable=no-member
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
logging.exception(e)
d['error'] = str(e)
return {
'datasource': self.datasource_name,
'description': self.description,
'description_markeddown': self.description_markeddown,
'edit_url': self.edit_url,
'form_data': self.form_data,
'slice_id': self.id,
'slice_name': self.slice_name,
'slice_url': self.slice_url,
}
@property
def json_data(self):
return json.dumps(self.data)
@property
def form_data(self):
form_data = {}
try:
form_data = json.loads(self.params)
except Exception as e:
logging.error("Malformed json in slice's params")
logging.exception(e)
form_data.update({
'slice_id': self.id,
'viz_type': self.viz_type,
'datasource': str(self.datasource_id) + '__' + self.datasource_type,
})
if self.cache_timeout:
form_data['cache_timeout'] = self.cache_timeout
return form_data
@property
def slice_url(self):
"""Defines the url to access the slice"""
return (
'/superset/explore/{obj.datasource_type}/'
'{obj.datasource_id}/?form_data={params}'.format(
obj=self, params=parse.quote(json.dumps(self.form_data))))
@property
def slice_id_url(self):
return (
'/superset/{slc.datasource_type}/{slc.datasource_id}/{slc.id}/'
).format(slc=self)
@property
def edit_url(self):
return '/slicemodelview/edit/{}'.format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params)
slice_params['slice_id'] = self.id
slice_params['json'] = 'false'
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else 'table'
return viz_types[slice_params.get('viz_type')](
self.datasource,
form_data=slice_params,
)
@classmethod
def import_obj(cls, slc_to_import, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(
remote_id=slc_to_import.id, import_time=import_time)
# find if the slice was already imported
slc_to_override = None
for slc in session.query(Slice).all():
if ('remote_id' in slc.params_dict and
slc.params_dict['remote_id'] == slc_to_import.id):
slc_to_override = slc
slc_to_import = slc_to_import.copy()
params = slc_to_import.params_dict
slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
session, slc_to_import.datasource_type, params['datasource_name'],
params['schema'], params['database_name']).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logging.info('Final slice: {}'.format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
sqla.event.listen(Slice, 'before_insert', set_related_perm)
sqla.event.listen(Slice, 'before_update', set_related_perm)
dashboard_slices = Table(
'dashboard_slices', metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
)
class Dashboard(Model, AuditMixinNullable, ImportMixin):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship(sm.user_model, secondary=dashboard_user)
export_fields = ('dashboard_title', 'position_json', 'json_metadata',
'description', 'css', 'slug')
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
# pylint: disable=no-member
return ', '.join(
{'{}'.format(s.datasource.full_name) for s in self.slices})
@property
def url(self):
if self.json_metadata:
# add default_filters to the preselect_filters of dashboard
json_metadata = json.loads(self.json_metadata)
default_filters = json_metadata.get('default_filters')
if default_filters:
filters = parse.quote(default_filters.encode('utf8'))
return '/superset/dashboard/{}/?preselect_filters={}'.format(
self.slug or self.id, filters)
return '/superset/dashboard/{}/'.format(self.slug or self.id)
@property
def datasources(self):
return {slc.datasource for slc in self.slices}
@property
def sqla_metadata(self):
# pylint: disable=no-member
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title)
return Markup(
'<a href="{self.url}">{title}</a>'.format(**locals()))
@property
def data(self):
positions = self.position_json
if positions:
positions = json.loads(positions)
return {
'id': self.id,
'metadata': self.params_dict,
'css': self.css,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': positions,
}
@property
def params(self):
return self.json_metadata
@params.setter
def params(self, value):
self.json_metadata = value
@property
def position_array(self):
if self.position_json:
return json.loads(self.position_json)
return []
@classmethod
def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copies over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position json:
[{
"col": 5,
"row": 10,
"size_x": 4,
"size_y": 2,
"slice_id": "3610"
}]
"""
position_array = dashboard.position_array
for position in position_array:
if 'slice_id' not in position:
continue
old_slice_id = int(position['slice_id'])
if old_slice_id in old_to_new_slc_id_dict:
position['slice_id'] = '{}'.format(
old_to_new_slc_id_dict[old_slice_id])
dashboard.position_json = json.dumps(position_array)
logging.info('Started import of the dashboard: {}'
.format(dashboard_to_import.to_json()))
session = db.session
logging.info('Dashboard has {} slices'
.format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
for slc in slices:
logging.info('Importing slice {} from the dashboard: {}'.format(
slc.to_json(), dashboard_to_import.dashboard_title))
new_slc_id = Slice.import_obj(slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = '{}'.format(new_slc_id)
old_slc_id_str = '{}'.format(slc.id)
if ('filter_immune_slices' in i_params_dict and
old_slc_id_str in i_params_dict['filter_immune_slices']):
new_filter_immune_slices.append(new_slc_id_str)
if ('timed_refresh_immune_slices' in i_params_dict and
old_slc_id_str in
i_params_dict['timed_refresh_immune_slices']):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if ('expanded_slices' in i_params_dict and
old_slc_id_str in i_params_dict['expanded_slices']):
new_expanded_slices[new_slc_id_str] = (
i_params_dict['expanded_slices'][old_slc_id_str])
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if ('remote_id' in dash.params_dict and
dash.params_dict['remote_id'] ==
dashboard_to_import.id):
existing_dashboard = dash
dashboard_to_import.id = None
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(
expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices)
new_slices = session.query(Slice).filter(
Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
# session.add(dashboard_to_import) causes sqlachemy failures
# related to the attached users / slices. Creating new object
# allows to avoid conflicts in the sql alchemy state.
copied_dash = dashboard_to_import.copy()
copied_dash.slices = new_slices
session.add(copied_dash)
session.flush()
return copied_dash.id
@classmethod
def export_dashboards(cls, dashboard_ids):
copied_dashboards = []
datasource_ids = set()
for dashboard_id in dashboard_ids:
# make sure that dashboard_id is an integer
dashboard_id = int(dashboard_id)
copied_dashboard = (
db.session.query(Dashboard)
.options(subqueryload(Dashboard.slices))
.filter_by(id=dashboard_id).first()
)
make_transient(copied_dashboard)
for slc in copied_dashboard.slices:
datasource_ids.add((slc.datasource_id, slc.datasource_type))
# add extra params for the import
slc.alter_params(
remote_id=slc.id,
datasource_name=slc.datasource.name,
schema=slc.datasource.name,
database_name=slc.datasource.database.name,
)
copied_dashboard.alter_params(remote_id=dashboard_id)
copied_dashboards.append(copied_dashboard)
eager_datasources = []
for dashboard_id, dashboard_type in datasource_ids:
eager_datasource = ConnectorRegistry.get_eager_datasource(
db.session, dashboard_type, dashboard_id)
eager_datasource.alter_params(
remote_id=eager_datasource.id,
database_name=eager_datasource.database.name,
)
make_transient(eager_datasource)
eager_datasources.append(eager_datasource)
return pickle.dumps({
'dashboards': copied_dashboards,
'datasources': eager_datasources,
})
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
type = 'table'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=False)
allow_run_sync = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
perm = Column(String(1000))
custom_password_store = config.get('SQLALCHEMY_CUSTOM_PASSWORD_STORE')
impersonate_user = Column(Boolean, default=False)
def __repr__(self):
return self.verbose_name if self.verbose_name else self.database_name
@property
def name(self):
return self.verbose_name if self.verbose_name else self.database_name
@property
def unique_name(self):
return self.database_name
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
@classmethod
def get_password_masked_url_from_uri(cls, uri):
url = make_url(uri)
return cls.get_password_masked_url(url)
@classmethod
def get_password_masked_url(cls, url):
url_copy = deepcopy(url)
if url_copy.password is not None and url_copy.password != PASSWORD_MASK:
url_copy.password = PASSWORD_MASK
return url_copy
def set_sqlalchemy_uri(self, uri):
conn = sqla.engine.url.make_url(uri.strip())
if conn.password != PASSWORD_MASK and not self.custom_password_store:
# do not over-write the password with the password mask
self.password = conn.password
conn.password = PASSWORD_MASK if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_effective_user(self, url, user_name=None):
"""
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
"""
effective_username = None
if self.impersonate_user:
effective_username = url.username
if user_name:
effective_username = user_name
elif (
hasattr(g, 'user') and hasattr(g.user, 'username') and
g.user.username is not None
):
effective_username = g.user.username
return effective_username
def get_sqla_engine(self, schema=None, nullpool=False, user_name=None):
extra = self.get_extra()
url = make_url(self.sqlalchemy_uri_decrypted)
url = self.db_engine_spec.adjust_database_uri(url, schema)
effective_username = self.get_effective_user(url, user_name)
# If using MySQL or Presto for example, will set url.username
# If using Hive, will not do anything yet since that relies on a
# configuration parameter instead.
self.db_engine_spec.modify_url_for_impersonation(
url,
self.impersonate_user,
effective_username)
masked_url = self.get_password_masked_url(url)
logging.info('Database.get_sqla_engine(). Masked URL: {0}'.format(masked_url))
params = extra.get('engine_params', {})
if nullpool:
params['poolclass'] = NullPool
# If using Hive, this will set hive.server2.proxy.user=$effective_username
configuration = {}
configuration.update(
self.db_engine_spec.get_configuration_for_impersonation(
str(url),
self.impersonate_user,
effective_username))
if configuration:
params['connect_args'] = {'configuration': configuration}
return create_engine(url, **params)
def get_reserved_words(self):
return self.get_sqla_engine().dialect.preparer.reserved_words
def get_quoter(self):
return self.get_sqla_engine().dialect.identifier_preparer.quote
def get_df(self, sql, schema):
sql = sql.strip().strip(';')
eng = self.get_sqla_engine(schema=schema)
df = pd.read_sql(sql, eng)
def needs_conversion(df_series):
if df_series.empty:
return False
if isinstance(df_series[0], (list, dict)):
return True
return False
for k, v in df.dtypes.iteritems():
if v.type == numpy.object_ and needs_conversion(df[k]):
df[k] = df[k].apply(utils.json_dumps_w_dates)
return df
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
compiled = qry.compile(eng, compile_kwargs={'literal_binds': True})
return '{}'.format(compiled)
def select_star(
self, table_name, schema=None, limit=100, show_cols=False,
indent=True, latest_partition=True):
"""Generates a ``select *`` statement in the proper dialect"""
return self.db_engine_spec.select_star(
self, table_name, schema=schema, limit=limit, show_cols=show_cols,
indent=indent, latest_partition=latest_partition)
def wrap_sql_limit(self, sql, limit=1000):
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*'])
.alias('inner_qry'),
).limit(limit)
)
return self.compile_sqla_query(qry)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
def all_table_names(self, schema=None, force=False):
if not schema:
tables_dict = self.db_engine_spec.fetch_result_sets(
self, 'table', force=force)
return tables_dict.get('', [])
return sorted(
self.db_engine_spec.get_table_names(schema, self.inspector))
def all_view_names(self, schema=None, force=False):
if not schema:
views_dict = self.db_engine_spec.fetch_result_sets(
self, 'view', force=force)
return views_dict.get('', [])
views = []
try:
views = self.inspector.get_view_names(schema)
except Exception:
pass
return views
def all_schema_names(self):
return sorted(self.inspector.get_schema_names())
@property
def db_engine_spec(self):
return db_engine_specs.engines.get(
self.backend, db_engine_specs.BaseEngineSpec)
@classmethod
def get_db_engine_spec_for_backend(cls, backend):
return db_engine_specs.engines.get(backend, db_engine_specs.BaseEngineSpec)
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
return self.db_engine_spec.time_grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name, schema=None):
return self.inspector.get_columns(table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
def get_pk_constraint(self, table_name, schema=None):
return self.inspector.get_pk_constraint(table_name, schema)
def get_foreign_keys(self, table_name, schema=None):
return self.inspector.get_foreign_keys(table_name, schema)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
if self.custom_password_store:
conn.password = self.custom_password_store(conn)
else:
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/superset/sql/{}/'.format(self.id)
def get_perm(self):
return (
'[{obj.database_name}].(id:{obj.id})').format(obj=self)
def has_table(self, table):
engine = self.get_sqla_engine()
return engine.has_table(
table.table_name, table.schema or None)
def get_dialect(self):
sqla_url = url.make_url(self.sqlalchemy_uri_decrypted)
return sqla_url.get_dialect()()
sqla.event.listen(Database, 'after_insert', set_perm)
sqla.event.listen(Database, 'after_update', set_perm)
class Log(Model):
"""ORM object used to log Superset actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship(sm.user_model, backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=datetime.utcnow)
dt = Column(Date, default=date.today())
duration_ms = Column(Integer)
referrer = Column(String(1024))
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
start_dttm = datetime.now()
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
post_data = request.form or {}
d.update(post_data)
d.update(kwargs)
slice_id = d.get('slice_id')
try:
slice_id = int(
slice_id or json.loads(d.get('form_data')).get('slice_id'))
except (ValueError, TypeError):
slice_id = 0
params = ''
try:
params = json.dumps(d)
except Exception:
pass
stats_logger.incr(f.__name__)
value = f(*args, **kwargs)
sesh = db.session()
log = cls(
action=f.__name__,
json=params,
dashboard_id=d.get('dashboard_id'),
slice_id=slice_id,
duration_ms=(
datetime.now() - start_dttm).total_seconds() * 1000,
referrer=request.referrer[:1000] if request.referrer else None,
user_id=user_id)
sesh.add(log)
sesh.commit()
return value
return wrapper
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=datetime.utcnow)
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = 'access_request'
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(config.get('ROBOT_PERMISSION_ROLES', []))
@property
def cls_model(self):
return ConnectorRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
# pylint: disable=no-member
ds = db.session.query(self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link # pylint: disable=no-member
@property
def roles_with_datasource(self):
action_list = ''
perm = self.datasource.perm # pylint: disable=no-member
pv = sm.find_permission_view_menu('datasource_access', perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
url = (
'/superset/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_grant={r.name}'
.format(**locals())
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
@property
def user_roles(self):
action_list = ''
for r in self.created_by.roles: # pylint: disable=no-member
url = (
'/superset/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_extend={r.name}'
.format(**locals())
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = '{} Role'.format(r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
| {
"repo_name": "alanmcruickshank/superset-dev",
"path": "superset/models/core.py",
"copies": "1",
"size": "33495",
"license": "apache-2.0",
"hash": -8313337820049135000,
"line_mean": 34.1469045121,
"line_max": 86,
"alpha_frac": 0.5986863711,
"autogenerated": false,
"ratio": 3.944765045342127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043451416442126,
"avg_score": null,
"num_lines": null
} |
"""A collection of pandas data interfaces to a project instance."""
from __future__ import absolute_import
import os.path as osp
from glob import glob
import warnings
try:
import pandas as pd
except ImportError:
raise ImportError('The pandas package is required for this plugin. '
'Try pip install pandas')
class ProjectOrRunData(pd.DataFrame):
"""
A representation of data read from either the project, a Run or path.
"""
path = None
plugin = []
def __init__(self, projectrunorpath):
from modelmanager.project import Project
# init DataFrame
pd.DataFrame.__init__(self)
self.name = self.__class__.__name__
# instantiated with project
if Project in projectrunorpath.__class__.__mro__:
self.project = projectrunorpath
self.read = self.from_project
if self.path:
self.path = osp.join(self.project.projectdir, self.path)
# instantiated with run
elif hasattr(projectrunorpath, 'files'):
from django.conf import settings
self.project = settings.PROJECT
self.run = projectrunorpath
self.path = self.find_file()
self.read = self.from_run
elif type(projectrunorpath) == str:
self.path = projectrunorpath
self.read = self.from_project
self.project = None
else:
raise IOError('Run includes no saved files.')
# read file
if self.path:
self.from_path(self.path)
return
def find_file(self):
# find file
fileqs = (self.run.files.filter(tags__contains=self.name) or
self.run.files.filter(file__contains=self.name))
if fileqs.count() > 1:
print('Found two files for %s, using last!' % self.name)
elif fileqs.count() == 0:
raise IOError('No file found for %s!' % self.name)
fileobj = fileqs.last()
return fileobj.file.path
def from_path(self, path, **readkwargs):
pd.DataFrame.__init__(self, self.read(path, **readkwargs))
self.path = path
return self
def from_run(self, path, **readkwargs):
"""
Read data from a run instance with files.
"""
reader = self.reader_by_ext(path)
return reader(path, **readkwargs)
def from_project(self, path, **kw):
"""!Overwrite me!"""
raise NotImplementedError('Cant read this ProjectOrRunData from '
'project, define a from_project method!')
def from_gzip(self, path, **readkwargs):
readkwargs['compression'] = 'gzip'
return self.reader_by_ext(osp.splitext(path)[0])(path, **readkwargs)
def reader_by_ext(self, path):
"""
Return the read method from_* using the self.path extension.
Raises a NotImplementedError if none found.
"""
ext = osp.splitext(path)[1][1:] # no dot
readmethodname = 'from_' + ext
if not hasattr(self, readmethodname):
raise NotImplementedError('No method %s to read file %s defined!' %
(readmethodname, path))
return getattr(self, readmethodname)
class ReadWriteDataFrame(pd.DataFrame):
"""
A representation of data read and written to file.
The ``read`` method has to reinitialise the dataframe. An example use as a
plugin (instantiated on project instantiation)::
class ProjectData(ReadWriteData):
path = 'some/relative/path.csv'
def read(self, **kw):
data = pd.read_table(self.path)
pd.DataFrame.__init__(self, data)
return
def write(self, **kw):
# possible error/consistency checking
assert len(self.columns) == 2, 'must have only 2 columns'
self.to_csv(self.path)
# add to project
p = swimpy.Project('project/')
p.settings(ProjectData)
# access the DataFrame
p.projectdata
# or modify and write out again
p.projectdata.write()
To defer reading of the dataframe until it is actually accessed, decorate
the class with a ``@modelmanager.utils.propertyplugin``.
"""
path = None
plugin = []
def __init__(self, projectorpath, read=True, **kwargs):
# init DataFrame
pd.DataFrame.__init__(self)
self.name = self.__class__.__name__
if type(projectorpath) == str:
self.path, self.project = projectorpath, None
else:
self.project = projectorpath
self.path = osp.join(self.project.projectdir, self.path)
if read:
errmsg = self.name + ' file does not exist: ' + self.path
assert osp.exists(self.path), errmsg
pd.DataFrame.__init__(self, self.read(**kwargs))
return
def __call__(self, data=None, **set):
"""
Assign read data from file and optionally set and write new values.
data: <2D-array-like>
Set entire dataframe.
**set: <array-like> | <dict>
Set columns or rows by key. Subset of values can be set by parsing
a dict. Creates new row if key is neither in columns or index.
"""
if data is not None:
pd.DataFrame.__init__(self, data)
self.write()
elif set:
self.read()
for k, v in set.items():
ix = slice(None)
if type(v) == dict:
ix, v = zip(*v.items())
if k in self.columns:
self.loc[ix, k] = v
else:
self.loc[k, ix] = v
self.write()
else:
self.read()
return self
def __repr__(self):
rpr = '<%s: %s >\n' % (self.name, osp.relpath(self.path))
return rpr + pd.DataFrame.__repr__(self)
def read(self, **kwargs):
"""
Override me and return pd.DataFrame.
"""
raise NotImplementedError('Reading of %s not implemented.' % self.name)
def write(self, **kwargs):
"""
Override me. Error checking and writing to file should be done here.
"""
raise NotImplementedError('Writing of %s not implemented.' % self.name)
class R(object):
"""
Interface plugin to R using rpy2 geared towards pandas interoperability.
The plugin makes the R object available as a project instance and sources
all R source files in the project resourcedir.
"""
def __init__(self, project=None):
self.project = project
self._initialize()
if project:
self.source_resources()
return
def _initialize(self):
try:
from rpy2.robjects import r, pandas2ri
except ImportError:
raise('Cant import rpy2 needed for the R plugin.')
# activate automatic pandas dataframe conversion
pandas2ri.activate()
self._r = r
self._pandas2ri = pandas2ri
return
def _source_resources(self):
prd = self.project.resourcedir
rsrc = sorted(glob(osp.join(prd, '*.r')) + glob(osp.join(prd, '*.R')))
if len(rsrc) == 0:
warnings.warn('No R source file found in %s' % prd)
else:
for s in rsrc:
self._r.source(s)
return
def to_python(self, obj):
"""Convert a rpy2 object to pandas and python equivalents."""
return self._pandas2ri.ri2py(obj)
def __getattr__(self, a):
return getattr(self._r, a)
def __call__(self, *args, **kwargs):
"""Call any R string/code."""
return self._r(*args, **kwargs)
| {
"repo_name": "mwort/modelmanager",
"path": "modelmanager/plugins/pandas.py",
"copies": "1",
"size": "7825",
"license": "bsd-3-clause",
"hash": 2659019043254886000,
"line_mean": 32.7284482759,
"line_max": 79,
"alpha_frac": 0.5640894569,
"autogenerated": false,
"ratio": 4.193461950696677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5257551407596677,
"avg_score": null,
"num_lines": null
} |
"""A collection of python stylers"""
import re
month_fullnames = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
month_threelets = {
1: "Jan",
2: "Feb",
3: "Mar",
4: "Apr",
5: "May",
6: "Jun",
7: "Jul",
8: "Aug",
9: "Sept",
10: "Oct",
11: "Nov",
12: "Dec",
}
def sentencecase(sentence):
"""returns a sentence in sentencecase but with text in braces preserved
Parameters
----------
sentence: str
The sentence
Returns
-------
The sentence in sentence-case (but preserving any text wrapped in braces)
Notes
-----
tbd or n/a are returned lower case, not sentence case.
"""
freezecaps = re.findall("\{[^{}]+\}", sentence)
datalow = sentence.capitalize()
sentencecase = re.split("\{[^{}]+\}", datalow)
title = ""
freezecaps.reverse()
for word in sentencecase:
if len(freezecaps) > 0:
title = title + word + freezecaps.pop()
else:
title = title + word
if title.lower() == "n/a":
title = title.lower()
if title.lower() == "tbd":
title = title.lower()
return title
| {
"repo_name": "scopatz/regolith",
"path": "regolith/stylers.py",
"copies": "1",
"size": "1321",
"license": "cc0-1.0",
"hash": 4253193127407000600,
"line_mean": 19.3230769231,
"line_max": 78,
"alpha_frac": 0.5162755488,
"autogenerated": false,
"ratio": 3.31077694235589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43270524911558894,
"avg_score": null,
"num_lines": null
} |
"""A collection of random tools for dealing with dates in Python.
.. deprecated:: 0.19.0
Use pandas.tseries module instead.
"""
# flake8: noqa
import warnings
from pandas.core.tools.datetimes import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
warnings.warn("The pandas.core.datetools module is deprecated and will be "
"removed in a future version. Please use the pandas.tseries "
"module instead.", FutureWarning, stacklevel=2)
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
| {
"repo_name": "louispotok/pandas",
"path": "pandas/core/datetools.py",
"copies": "4",
"size": "1494",
"license": "bsd-3-clause",
"hash": 5319722831966819000,
"line_mean": 26.1636363636,
"line_max": 75,
"alpha_frac": 0.7510040161,
"autogenerated": false,
"ratio": 3.4187643020594964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
"""A collection of random tools for dealing with dates in Python"""
# flake8: noqa
from pandas.tseries.tools import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
| {
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/datetools.py",
"copies": "2",
"size": "1190",
"license": "mit",
"hash": 4299396511536654000,
"line_mean": 25.4444444444,
"line_max": 67,
"alpha_frac": 0.7672268908,
"autogenerated": false,
"ratio": 3.278236914600551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045463805400551,
"avg_score": null,
"num_lines": null
} |
"""A collection of random tools for dealing with dates in Python"""
# flake8: noqa
import warnings
from pandas.core.tools.datetimes import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
warnings.warn("The pandas.core.datetools module is deprecated and will be "
"removed in a future version. Please use the pandas.tseries "
"module instead.", FutureWarning, stacklevel=2)
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
| {
"repo_name": "jmmease/pandas",
"path": "pandas/core/datetools.py",
"copies": "16",
"size": "1429",
"license": "bsd-3-clause",
"hash": -4846069132785568000,
"line_mean": 27.0196078431,
"line_max": 75,
"alpha_frac": 0.755073478,
"autogenerated": false,
"ratio": 3.4350961538461537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A collection of random tools for dealing with dates in Python"""
# flake8: noqa
import warnings
from pandas.tseries.tools import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
warnings.warn("The pandas.core.datetools module is deprecated and will be "
"removed in a future version. Please use the pandas.tseries "
"module instead.", FutureWarning, stacklevel=2)
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
| {
"repo_name": "andyraib/data-storage",
"path": "python_scripts/env/lib/python3.6/site-packages/pandas/core/datetools.py",
"copies": "7",
"size": "1422",
"license": "apache-2.0",
"hash": -3057795233297330700,
"line_mean": 26.8823529412,
"line_max": 75,
"alpha_frac": 0.7545710267,
"autogenerated": false,
"ratio": 3.4265060240963856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 51
} |
"""A collection of random tools for dealing with dates in Python"""
from pandas.tseries.tools import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
def _resolve_offset(freq, kwds):
if 'timeRule' in kwds or 'offset' in kwds:
offset = kwds.get('offset', None)
offset = kwds.get('timeRule', offset)
if isinstance(offset, compat.string_types):
offset = getOffset(offset)
warn = True
else:
offset = freq
warn = False
if warn:
import warnings
warnings.warn("'timeRule' and 'offset' parameters are deprecated,"
" please use 'freq' instead",
FutureWarning)
return offset
| {
"repo_name": "sauloal/cnidaria",
"path": "scripts/venv/lib/python2.7/site-packages/pandas/core/datetools.py",
"copies": "6",
"size": "1729",
"license": "mit",
"hash": -7689920604292522000,
"line_mean": 26.4444444444,
"line_max": 74,
"alpha_frac": 0.7004048583,
"autogenerated": false,
"ratio": 3.6020833333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 63
} |
"""A collection of random tools for dealing with dates in Python"""
from datetime import datetime, timedelta
from dateutil import parser
from dateutil.relativedelta import relativedelta
import calendar
#-------------------------------------------------------------------------------
# Miscellaneous date functions
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
raise Exception("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
def to_datetime(arg):
"""Attempts to convert arg to datetime"""
if arg is None or isinstance(arg, datetime):
return arg
try:
return parser.parse(arg)
except Exception:
return arg
def normalize_date(dt):
return datetime(dt.year, dt.month, dt.day)
#-------------------------------------------------------------------------------
# DateOffset
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing is date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
# For some offsets, want to drop the time information off the
# first date
_normalizeFirst = False
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
def apply(self, other):
if len(self.kwds) > 0:
if self.n > 0:
for i in xrange(self.n):
other = other + relativedelta(**self.kwds)
else:
for i in xrange(-self.n):
other = other - relativedelta(**self.kwds)
return other
else:
return other + timedelta(self.n)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, **self.kwds)
def _params(self):
attrs = sorted((item for item in self.__dict__.iteritems()
if item[0] != 'kwds'))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc'])
attrs = []
for attr in self.__dict__:
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
def __eq__(self, other):
return self._params() == other._params()
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
return self.apply(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset!')
elif type(other) == type(self):
return self.__class__(self.n - other.n, **self.kwds)
else: # pragma: no cover
raise TypeError('Cannot subtract %s from %s'
% (type(other), type(self)))
def __rsub__(self, other):
return self.__class__(-self.n, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, **self.kwds)
def rollback(self, someDate):
"""Roll provided date backward to next offset only if not on offset"""
if self._normalizeFirst:
someDate = normalize_date(someDate)
if not self.onOffset(someDate):
someDate = someDate - self.__class__(1, **self.kwds)
return someDate
def rollforward(self, someDate):
"""Roll provided date forward to next offset only if not on offset"""
if self._normalizeFirst:
someDate = normalize_date(someDate)
if not self.onOffset(someDate):
someDate = someDate + self.__class__(1, **self.kwds)
return someDate
def onOffset(self, someDate):
# Default (slow) method for determining if some date is a
# member of the DateRange generated by this offset. Subclasses
# may have this re-implemented in a nicer way.
return someDate == ((someDate + self) - self)
class BDay(DateOffset):
"""
DateOffset subclass representing possibly n business days
"""
_normalizeFirst = True
_outputName = 'BusinessDay'
def __init__(self, n=1, **kwds):
self.n = int(n)
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.normalize = kwds.get('normalize', True)
def __repr__(self):
className = getattr(self, '_outputName', self.__class__.__name__)
attrs = []
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
out = '<%s ' % self.n + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.normalize:
result = datetime(result.year, result.month, result.day)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise Exception('Only know how to combine business day with '
'datetime or timedelta!')
@classmethod
def onOffset(cls, someDate):
return someDate.weekday() < 5
class MonthEnd(DateOffset):
_normalizeFirst = True
"""DateOffset of one month end"""
def apply(self, other):
n = self.n
_, nDaysInMonth = calendar.monthrange(other.year, other.month)
if other.day != nDaysInMonth:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@classmethod
def onOffset(cls, someDate):
__junk, nDaysInMonth = calendar.monthrange(someDate.year,
someDate.month)
return someDate.day == nDaysInMonth
class BMonthEnd(DateOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessMonthEnd'
_normalizeFirst = True
def isAnchored(self):
return (self.n == 1)
def apply(self, other):
n = self.n
wkday, nDaysInMonth = calendar.monthrange(other.year, other.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
class Week(DateOffset):
"""
weekday
0: Mondays
1: Tuedays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.n = n
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise Exception('Day must be 0<=day<=6, got %d' %
self.weekday)
self.inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
def apply(self, other):
if self.weekday is None:
return other + self.n * self.inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
for i in xrange(k):
other = other + self.inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in xrange(-k):
other = other - self.inc
return other
def onOffset(self, someDate):
return someDate.weekday() == self.weekday
class BQuarterEnd(DateOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.n = n
self.startingMonth = kwds.get('startingMonth', 3)
if self.startingMonth < 1 or self.startingMonth > 3:
raise Exception('Start month must be 1<=day<=3, got %d'
% self.startingMonth)
self.offset = BMonthEnd(3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
def apply(self, other):
n = self.n
wkday, nDaysInMonth = calendar.monthrange(other.year, other.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3*n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, someDate):
modMonth = (someDate.month - self.startingMonth) % 3
return BMonthEnd().onOffset(someDate) and modMonth == 0
class BYearEnd(DateOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_normalizeFirst = True
def __init__(self, n=1, **kwds):
self.month = kwds.get('month', 12)
if self.month < 1 or self.month > 12:
raise Exception('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, **kwds)
def apply(self, other):
n = self.n
if self._normalizeFirst:
other = normalize_date(other)
wkday, nDaysInMonth = calendar.monthrange(other.year, self.month)
lastBDay = nDaysInMonth - max(((wkday + nDaysInMonth - 1) % 7) - 4, 0)
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = calendar.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month)
if result.weekday() > 4:
result = result - BDay()
return result
class YearEnd(DateOffset):
"""DateOffset increments between calendar year ends"""
_normalizeFirst = True
def apply(self, other):
n = self.n
if other.month != 12 or other.day != 31:
other = datetime(other.year - 1, 12, 31)
if n <= 0:
n = n + 1
other = other + relativedelta(years=n)
return other
@classmethod
def onOffset(cls, someDate):
return someDate.month == 12 and someDate.day == 31
class YearBegin(DateOffset):
"""DateOffset increments between calendar year begin dates"""
_normalizeFirst = True
def apply(self, other):
n = self.n
if other.month != 1 or other.day != 1:
other = datetime(other.year, 1, 1)
if n <= 0:
n = n + 1
other = other + relativedelta(years = n, day=1)
return other
@classmethod
def onOffset(cls, someDate):
return someDate.month == 1 and someDate.day == 1
#-------------------------------------------------------------------------------
# Ticks
class Tick(DateOffset):
_normalizeFirst = False
_delta = None
_inc = timedelta(microseconds=1000)
@property
def delta(self):
if self._delta is None:
self._delta = self.n * self._inc
return self._delta
def apply(self, other):
if isinstance(other, (datetime, timedelta)):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
class Hour(Tick):
_inc = timedelta(0, 3600)
class Minute(Tick):
_inc = timedelta(0, 60)
class Second(Tick):
_inc = timedelta(0, 1)
day = DateOffset()
bday = BDay(normalize=True)
businessDay = bday
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
businessMonthEnd = bmonthEnd
bquarterEnd = BQuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
#-------------------------------------------------------------------------------
# Offset names ("time rules") and related functions
_offsetMap = {
"WEEKDAY" : BDay(1),
"EOM" : BMonthEnd(1),
"W@MON" : Week(weekday=0),
"W@TUE" : Week(weekday=1),
"W@WED" : Week(weekday=2),
"W@THU" : Week(weekday=3),
"W@FRI" : Week(weekday=4),
"Q@JAN" : BQuarterEnd(startingMonth=1),
"Q@FEB" : BQuarterEnd(startingMonth=2),
"Q@MAR" : BQuarterEnd(startingMonth=3),
"A@JAN" : BYearEnd(month=1),
"A@FEB" : BYearEnd(month=2),
"A@MAR" : BYearEnd(month=3),
"A@APR" : BYearEnd(month=4),
"A@MAY" : BYearEnd(month=5),
"A@JUN" : BYearEnd(month=6),
"A@JUL" : BYearEnd(month=7),
"A@AUG" : BYearEnd(month=8),
"A@SEP" : BYearEnd(month=9),
"A@OCT" : BYearEnd(month=10),
"A@NOV" : BYearEnd(month=11),
"A@DEC" : BYearEnd()
}
_offsetNames = dict([(v, k) for k, v in _offsetMap.iteritems()])
def inferTimeRule(index):
if len(index) < 3:
raise Exception('Need at least three dates to infer time rule!')
first, second, third = index[:3]
for rule, offset in _offsetMap.iteritems():
if second == (first + offset) and third == (second + offset):
return rule
raise Exception('Could not infer time rule from data!')
def getOffset(name):
"""
Return DateOffset object associated with rule name
Example
-------
getOffset('EOM') --> BMonthEnd(1)
"""
offset = _offsetMap.get(name)
if offset is not None:
return offset
else:
raise Exception('Bad rule name requested: %s!' % name)
def hasOffsetName(offset):
return offset in _offsetNames
def getOffsetName(offset):
"""
Return rule name associated with a DateOffset object
Example
-------
getOffsetName(BMonthEnd(1)) --> 'EOM'
"""
name = _offsetNames.get(offset)
if name is not None:
return name
else:
raise Exception('Bad offset name requested: %s!' % offset)
| {
"repo_name": "willgrass/pandas",
"path": "pandas/core/datetools.py",
"copies": "1",
"size": "19053",
"license": "bsd-3-clause",
"hash": -2739760356557671400,
"line_mean": 28.9804878049,
"line_max": 80,
"alpha_frac": 0.5440088175,
"autogenerated": false,
"ratio": 3.9083076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9938616286467478,
"avg_score": 0.00274004466804289,
"num_lines": 615
} |
# a collection of sample visualization functions
# for binding to plotnode
import matplotlib.pyplot as plt
import numpy as np
def viz_square(data, normalize=True, cmap=plt.cm.gray, padsize=1, padval=0):
"""
takes a np.ndarray of shape (n, height, width) or (n, height, width, channels)
visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
However, this only draws first input channel
"""
# normalize to 0-1 range
if normalize:
data -= data.min()
data /= data.max()
n = int(np.ceil(np.sqrt(data.shape[0]))) # force square
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.matshow(data,cmap=cmap)
def viz_conv_weights(ctx, weight):
# visualize all output filters
# for the first input channel
viz_square(weight.transpose(3,0,1,2)[:,:,:,0])
def viz_activations(ctx, m):
plt.matshow(m.T,cmap=plt.cm.gray)
plt.title("LeNet Predictions")
plt.xlabel("Batch")
plt.ylabel("Digit Activation")
def viz_weight_hist(ctx, w):
plt.hist(w.flatten())
def viz_conv_hist(ctx, w):
n = int(np.ceil(np.sqrt(w.shape[3]))) # force square
f, axes = plt.subplots(n,n,sharex=True,sharey=True)
for i in range(w.shape[3]): # for each output channel
r,c=i//n,i%n
axes[r,c].hist(w[:,:,:,i].flatten())
axes[r,c].get_xaxis().set_visible(False)
axes[r,c].get_yaxis().set_visible(False)
def viz_fc_weights(ctx, w):
# visualize fully connected weights
plt.matshow(w.T,cmap=plt.cm.gray)
def watch_loss(ctx,loss):
if not hasattr(ctx, 'loss_history'):
ctx.loss_history=[]
ctx.loss_history.append(loss)
plt.plot(ctx.loss_history)
plt.ylabel('loss')
| {
"repo_name": "ericjang/tdb",
"path": "tdb/examples/viz.py",
"copies": "1",
"size": "2019",
"license": "apache-2.0",
"hash": 3068488024545575400,
"line_mean": 34.4210526316,
"line_max": 105,
"alpha_frac": 0.6478454681,
"autogenerated": false,
"ratio": 2.888412017167382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.889320456084569,
"avg_score": 0.028610584884338416,
"num_lines": 57
} |
"""A collection of sensors helpers."""
import math
import numpy as np
import carla
class Sensor(object):
"""Base class for wrapping sensors."""
def __init__(self, parent_actor: carla.Actor, transform=carla.Transform(), attachment_type=None,
attributes: dict = None):
self.parent = parent_actor
self.world = self.parent.get_world()
self.attributes = attributes or dict()
self.event_callbacks = []
# Look for callback(s)
if 'callback' in self.attributes:
self.event_callbacks.append(self.attributes.pop('callback'))
elif 'callbacks' in self.attributes:
for callback in self.attributes.pop('callbacks'):
self.event_callbacks.append(callback)
# detector-sensors retrieve data only when triggered (not at each tick!)
self.sensor, self.is_detector = self._spawn(transform, attachment_type)
@property
def name(self) -> str:
raise NotImplementedError
def set_parent_actor(self, actor: carla.Actor):
self.parent = actor
def add_callback(self, callback):
assert callable(callback)
self.event_callbacks.append(callback)
def clear_callbacks(self):
self.event_callbacks.clear()
@staticmethod
def create(sensor_type, **kwargs):
if sensor_type == 'sensor.other.collision':
return CollisionDetector(**kwargs)
elif sensor_type == 'sensor.other.lane_invasion':
return LaneInvasionSensor(**kwargs)
elif sensor_type == 'sensor.other.gnss':
return GnssSensor(**kwargs)
elif sensor_type == 'sensor.other.imu':
return IMUSensor(**kwargs)
elif sensor_type == 'sensor.camera.rgb':
return RGBCameraSensor(**kwargs)
elif sensor_type == 'sensor.camera.semantic_segmentation':
return SemanticCameraSensor(**kwargs)
elif sensor_type == 'sensor.camera.depth':
return DepthCameraSensor(**kwargs)
elif sensor_type == 'sensor.other.obstacle':
return ObstacleDetector(**kwargs)
elif sensor_type == 'sensor.lidar.ray_cast':
return LidarSensor(**kwargs)
elif sensor_type == 'sensor.other.radar':
return RadarSensor(**kwargs)
else:
raise ValueError(f'String `{sensor_type}` does not denote a valid sensor!')
def start(self):
"""Start listening for events"""
if not self.sensor.is_listening:
self.sensor.listen(self.on_event)
else:
print(f'Sensor {self.name} is already been started!')
def stop(self):
"""Stop listening for events"""
self.sensor.stop()
def _spawn(self, transform, attachment_type=None):
"""Spawns itself within a carla.World."""
if attachment_type is None:
attachment_type = carla.AttachmentType.Rigid
sensor_bp: carla.ActorBlueprint = self.world.get_blueprint_library().find(self.name)
for attr, value in self.attributes.items():
if sensor_bp.has_attribute(attr):
sensor_bp.set_attribute(attr, str(value))
else:
print(f'Sensor {self.name} has no attribute `{attr}`')
sensor_actor = self.world.spawn_actor(sensor_bp, transform, self.parent, attachment_type)
is_detector = not sensor_bp.has_attribute('sensor_tick')
return sensor_actor, is_detector
def on_event(self, event):
for callback in self.event_callbacks:
callback(event)
def destroy(self):
if self.sensor is not None:
self.sensor.stop()
self.sensor.destroy()
self.sensor = None
self.parent = None
self.world = None
# -------------------------------------------------------------------------------------------------
# -- Camera Sensors
# -------------------------------------------------------------------------------------------------
class CameraSensor(Sensor):
def __init__(self, color_converter=carla.ColorConverter.Raw, **kwargs):
super().__init__(**kwargs)
self.color_converter = color_converter
@property
def name(self):
raise NotImplementedError
def convert_image(self, image: carla.Image, color_converter=None):
color_converter = color_converter or self.color_converter or carla.ColorConverter.Raw
image.convert(color_converter)
array = np.frombuffer(image.raw_data, dtype=np.uint8)
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return array
def save_to_disk(self, image: carla.Image, path: str):
"""Saves the carla.Image to disk using its color_converter."""
assert isinstance(image, carla.Image)
assert isinstance(path, str)
image.save_to_disk(path, color_converter=self.color_converter)
class RGBCameraSensor(CameraSensor):
@property
def name(self):
return 'sensor.camera.rgb'
class DepthCameraSensor(CameraSensor):
@property
def name(self):
return 'sensor.camera.depth'
class SemanticCameraSensor(CameraSensor):
@property
def name(self):
return 'sensor.camera.semantic_segmentation'
# -------------------------------------------------------------------------------------------------
# -- Detector Sensors
# -------------------------------------------------------------------------------------------------
class CollisionDetector(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
@property
def name(self):
return 'sensor.other.collision'
class LaneInvasionSensor(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
@property
def name(self):
return 'sensor.other.lane_invasion'
class ObstacleDetector(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
@property
def name(self):
return 'sensor.other.obstacle'
# -------------------------------------------------------------------------------------------------
# -- Other Sensors
# -------------------------------------------------------------------------------------------------
class LidarSensor(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
@property
def name(self):
return 'sensor.lidar.ray_cast'
class RadarSensor(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
@property
def name(self):
return 'sensor.other.radar'
@staticmethod
def convert(radar_measurement: carla.RadarMeasurement):
"""Converts a carla.RadarMeasurement into a numpy array [[velocity, altitude, azimuth, depth]]"""
points = np.frombuffer(radar_measurement.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (len(radar_measurement), 4))
return points
class GnssSensor(Sensor):
def __init__(self, parent_actor, transform=carla.Transform(carla.Location(x=1.0, z=2.8)), **kwargs):
super().__init__(parent_actor, transform=transform, **kwargs)
self.lat = 0.0
self.lon = 0.0
@property
def name(self):
return 'sensor.other.gnss'
def on_event(self, event):
super().on_event(event)
self.lat = event.latitude
self.lon = event.longitude
def destroy(self):
super().destroy()
self.lat = None
self.lon = None
class IMUSensor(Sensor):
def __init__(self, parent_actor, **kwargs):
super().__init__(parent_actor, **kwargs)
self.accelerometer = (0.0, 0.0, 0.0)
self.gyroscope = (0.0, 0.0, 0.0)
self.compass = 0.0
@property
def name(self):
return 'sensor.other.imu'
def on_event(self, event):
super().on_event(event)
limits = (-99.9, 99.9)
self.accelerometer = (
max(limits[0], min(limits[1], event.accelerometer.x)),
max(limits[0], min(limits[1], event.accelerometer.y)),
max(limits[0], min(limits[1], event.accelerometer.z)))
self.gyroscope = (
max(limits[0], min(limits[1], math.degrees(event.gyroscope.x))),
max(limits[0], min(limits[1], math.degrees(event.gyroscope.y))),
max(limits[0], min(limits[1], math.degrees(event.gyroscope.z))))
self.compass = math.degrees(event.compass)
def destroy(self):
super().destroy()
self.accelerometer = None
self.gyroscope = None
self.compass = None
# -------------------------------------------------------------------------------------------------
# -- Sensors specifications
# -------------------------------------------------------------------------------------------------
class SensorSpecs(object):
ATTACHMENT_TYPE = {'SpringArm': carla.AttachmentType.SpringArm,
'Rigid': carla.AttachmentType.Rigid,
None: carla.AttachmentType.Rigid}
COLOR_CONVERTER = {'Raw': carla.ColorConverter.Raw,
'CityScapesPalette': carla.ColorConverter.CityScapesPalette,
'Depth': carla.ColorConverter.Depth,
'LogarithmicDepth': carla.ColorConverter.LogarithmicDepth,
None: carla.ColorConverter.Raw}
@staticmethod
def get_position(position: str = None) -> carla.Transform:
if position == 'top':
return carla.Transform(carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0))
elif position == 'top-view':
return carla.Transform(carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0))
elif position == 'front':
return carla.Transform(carla.Location(x=1.5, z=1.8))
elif position == 'on-top':
return carla.Transform(carla.Location(x=-0.9, y=0.0, z=2.2))
elif position == 'on-top2':
return carla.Transform(carla.Location(x=0.0, y=0.0, z=2.2))
elif position == 'radar':
return carla.Transform(carla.Location(x=2.8, z=1.0), carla.Rotation(pitch=5))
else:
return carla.Transform()
@staticmethod
def set(sensor_spec: dict, **kwargs):
for key, value in kwargs.items():
if key == 'position':
sensor_spec['transform'] = SensorSpecs.get_position(value)
elif key == 'attachment_type':
sensor_spec[key] = SensorSpecs.ATTACHMENT_TYPE[value]
elif key == 'color_converter':
sensor_spec[key] = SensorSpecs.COLOR_CONVERTER[value]
@staticmethod
def add_callback(sensor_spec: dict, callback):
assert callable(callback)
assert isinstance(sensor_spec, dict)
attributes = sensor_spec.get('attributes', dict())
if 'callback' in attributes:
attributes['callbacks'] = [callback, attributes.pop('callback')]
elif 'callbacks' in attributes:
attributes['callbacks'].append(callback)
else:
attributes['callback'] = callback
sensor_spec['attributes'] = attributes
@staticmethod
def set_color_converter(camera_spec: dict, color_converter: str = None):
camera_spec['color_converter'] = SensorSpecs.COLOR_CONVERTER[color_converter]
return SensorSpecs
@staticmethod
def camera(kind: str, transform: carla.Transform = None, position: str = None, attachment_type=None,
color_converter=None, **kwargs) -> dict:
assert kind in ['rgb', 'depth', 'semantic_segmentation']
return dict(type='sensor.camera.' + kind,
transform=transform or SensorSpecs.get_position(position),
attachment_type=SensorSpecs.ATTACHMENT_TYPE[attachment_type],
color_converter=SensorSpecs.COLOR_CONVERTER[color_converter],
attributes=kwargs)
@staticmethod
def rgb_camera(transform: carla.Transform = None, position: str = None, attachment_type='SpringArm',
color_converter='Raw', **kwargs):
return SensorSpecs.camera('rgb', transform, position, attachment_type, color_converter, **kwargs)
@staticmethod
def depth_camera(transform: carla.Transform = None, position: str = None, attachment_type='SpringArm',
color_converter='LogarithmicDepth', **kwargs):
return SensorSpecs.camera('depth', transform, position, attachment_type, color_converter, **kwargs)
@staticmethod
def segmentation_camera(transform: carla.Transform = None, position: str = None, attachment_type='SpringArm',
color_converter='CityScapesPalette', **kwargs):
return SensorSpecs.camera('semantic_segmentation', transform, position, attachment_type, color_converter, **kwargs)
@staticmethod
def detector(kind: str, transform: carla.Transform = None, position: str = None, attachment_type=None,
**kwargs) -> dict:
assert kind in ['collision', 'lane_invasion', 'obstacle']
return dict(type='sensor.other.' + kind,
transform=transform or SensorSpecs.get_position(position),
attachment_type=SensorSpecs.ATTACHMENT_TYPE[attachment_type],
attributes=kwargs)
@staticmethod
def collision_detector(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.detector('collision', transform, position, attachment_type, **kwargs)
@staticmethod
def lane_detector(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.detector('lane_invasion', transform, position, attachment_type, **kwargs)
@staticmethod
def obstacle_detector(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.detector('obstacle', transform, position, attachment_type, **kwargs)
@staticmethod
def other(kind: str, transform: carla.Transform = None, position: str = None, attachment_type=None, **kwargs) -> dict:
assert kind in ['imu', 'gnss', 'radar']
return dict(type='sensor.other.' + kind,
transform=transform or SensorSpecs.get_position(position),
attachment_type=SensorSpecs.ATTACHMENT_TYPE[attachment_type],
attributes=kwargs)
@staticmethod
def lidar(transform: carla.Transform = None, position: str = None, attachment_type=None, **kwargs) -> dict:
return dict(type='sensor.lidar.ray_cast',
transform=transform or SensorSpecs.get_position(position),
attachment_type=SensorSpecs.ATTACHMENT_TYPE[attachment_type],
attributes=kwargs)
@staticmethod
def radar(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.other('radar', transform, position, attachment_type, **kwargs)
@staticmethod
def imu(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.other('imu', transform, position, attachment_type, **kwargs)
@staticmethod
def gnss(transform: carla.Transform = None, position: str = None, attachment_type='Rigid', **kwargs):
return SensorSpecs.other('imu', transform, position, attachment_type, **kwargs)
| {
"repo_name": "reinforceio/tensorforce",
"path": "tensorforce/environments/carla/sensors.py",
"copies": "1",
"size": "15700",
"license": "apache-2.0",
"hash": -559809156252893300,
"line_mean": 36.6498800959,
"line_max": 123,
"alpha_frac": 0.5924840764,
"autogenerated": false,
"ratio": 4.068411505571391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5160895581971391,
"avg_score": null,
"num_lines": null
} |
"""A collection of shared utilities for all encoders, not intended for external use."""
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
__author__ = 'willmcginnis'
def convert_cols_to_list(cols):
if isinstance(cols, pd.Series):
return cols.tolist()
elif isinstance(cols, np.ndarray):
return cols.tolist()
elif np.isscalar(cols):
return [cols]
elif isinstance(cols, set):
return list(cols)
elif isinstance(cols, tuple):
return list(cols)
elif pd.api.types.is_categorical(cols):
return cols.astype(object).tolist()
return cols
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols
def is_category(dtype):
return pd.api.types.is_categorical_dtype(dtype)
def convert_input(X, columns=None, deep=False):
"""
Unite data into a DataFrame.
Objects that do not contain column names take the names from the argument.
Optionally perform deep copy of the data.
"""
if not isinstance(X, pd.DataFrame):
if isinstance(X, pd.Series):
X = pd.DataFrame(X, copy=deep)
else:
if columns is not None and np.size(X,1) != len(columns):
raise ValueError('The count of the column names does not correspond to the count of the columns')
if isinstance(X, list):
X = pd.DataFrame(X, columns=columns, copy=deep) # lists are always copied, but for consistency, we still pass the argument
elif isinstance(X, (np.generic, np.ndarray)):
X = pd.DataFrame(X, columns=columns, copy=deep)
elif isinstance(X, csr_matrix):
X = pd.DataFrame(X.todense(), columns=columns, copy=deep)
else:
raise ValueError('Unexpected input type: %s' % (str(type(X))))
X = X.apply(lambda x: pd.to_numeric(x, errors='ignore'))
elif deep:
X = X.copy(deep=True)
return X
def convert_input_vector(y, index):
"""
Unite target data type into a Series.
If the target is a Series or a DataFrame, we preserve its index.
But if the target does not contain index attribute, we use the index from the argument.
"""
if y is None:
raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None')
if isinstance(y, pd.Series):
return y
elif isinstance(y, np.ndarray):
if len(np.shape(y))==1: # vector
return pd.Series(y, name='target', index=index)
elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix
return pd.Series(y[0, :], name='target', index=index)
elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix
return pd.Series(y[:, 0], name='target', index=index)
else:
raise ValueError('Unexpected input shape: %s' % (str(np.shape(y))))
elif np.isscalar(y):
return pd.Series([y], name='target', index=index)
elif isinstance(y, list):
if len(y)==0 or (len(y)>0 and not isinstance(y[0], list)): # empty list or a vector
return pd.Series(y, name='target', index=index)
elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix
flatten = lambda y: [item for sublist in y for item in sublist]
return pd.Series(flatten(y), name='target', index=index)
elif len(y)==1 and isinstance(y[0], list): # single column in a matrix
return pd.Series(y[0], name='target', index=index)
else:
raise ValueError('Unexpected input shape')
elif isinstance(y, pd.DataFrame):
if len(list(y))==0: # empty DataFrame
return pd.Series(y, name='target')
if len(list(y))==1: # a single column
return y.iloc[:, 0]
else:
raise ValueError('Unexpected input shape: %s' % (str(y.shape)))
else:
return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types
def get_generated_cols(X_original, X_transformed, to_transform):
"""
Returns a list of the generated/transformed columns.
Arguments:
X_original: df
the original (input) DataFrame.
X_transformed: df
the transformed (current) DataFrame.
to_transform: [str]
a list of columns that were transformed (as in the original DataFrame), commonly self.cols.
Output:
a list of columns that were transformed (as in the current DataFrame).
"""
original_cols = list(X_original.columns)
if len(to_transform) > 0:
[original_cols.remove(c) for c in to_transform]
current_cols = list(X_transformed.columns)
if len(original_cols) > 0:
[current_cols.remove(c) for c in original_cols]
return current_cols
| {
"repo_name": "wdm0006/categorical_encoding",
"path": "category_encoders/utils.py",
"copies": "1",
"size": "5083",
"license": "bsd-3-clause",
"hash": -929589737662242600,
"line_mean": 36.375,
"line_max": 139,
"alpha_frac": 0.615974818,
"autogenerated": false,
"ratio": 3.8017950635751685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49177698815751686,
"avg_score": null,
"num_lines": null
} |
"""A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords",
"digits", "hexdigits", "octdigits", "printable", "punctuation",
"whitespace", "Formatter", "Template"]
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap as _ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'safe_substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(*args, **kwargs):
if not args:
raise TypeError("descriptor 'format' of 'Formatter' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
try:
format_string, *args = args # allow the "format_string" keyword be passed
except ValueError:
if 'format_string' in kwargs:
format_string = kwargs.pop('format_string')
import warnings
warnings.warn("Passing 'format_string' as keyword argument is "
"deprecated", DeprecationWarning, stacklevel=2)
else:
raise TypeError("format() missing 1 required positional "
"argument: 'format_string'") from None
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=0):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == '':
if auto_arg_index is False:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError('cannot switch from manual field '
'specification to automatic field '
'numbering')
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec, auto_arg_index = self._vformat(
format_spec, args, kwargs,
used_args, recursion_depth-1,
auto_arg_index=auto_arg_index)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result), auto_arg_index
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| {
"repo_name": "Reflexe/doc_to_pdf",
"path": "Windows/program/python-core-3.5.0/lib/string.py",
"copies": "5",
"size": "11854",
"license": "mpl-2.0",
"hash": -5982963243501532000,
"line_mean": 37.2387096774,
"line_max": 85,
"alpha_frac": 0.5656318542,
"autogenerated": false,
"ratio": 4.503799392097265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001853309774979155,
"num_lines": 310
} |
"""A collection of string operations (most are no longer used in Python 1.6).
Warning: most of the code you see here isn't normally used nowadays. With
Python 1.6, many of these functions are implemented as methods on the
standard string object. They used to be implemented by a built-in module
called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width):
"""ljust(s, width) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
return s.ljust(width)
# Right-justify a string
def rjust(s, width):
"""rjust(s, width) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
return s.rjust(width)
# Center a string
def center(s, width):
"""center(s, width) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated.
"""
return s.center(width)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
# See also regsub.capwords().
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return join(map(capitalize, s.split(sep)), sep or ' ')
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return join(L, "")
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/string.py",
"copies": "3",
"size": "11367",
"license": "mit",
"hash": -901562446709140200,
"line_mean": 28.7565445026,
"line_max": 77,
"alpha_frac": 0.6832057711,
"autogenerated": false,
"ratio": 3.8901437371663246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003763902984357882,
"num_lines": 382
} |
"""A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
n = len(fromstr)
if n != len(tostr):
raise ValueError, "maketrans arguments must have same length"
# this function has been rewritten to suit PyPy better; it is
# almost 10x faster than the original.
buf = bytearray(256)
for i in range(256):
buf[i] = i
for i in range(n):
buf[ord(fromstr[i])] = tostr[i]
return str(buf)
####################################################################
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions or table is None:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxreplace=-1):
"""replace (str, old, new[, maxreplace]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxreplace is
given, only the first maxreplace occurrences are replaced.
"""
return s.replace(old, new, maxreplace)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str and unicode.
# The overall parser is implemented in str._formatter_parser.
# The field name parser is implemented in str._formatter_field_name_split
class Formatter(object):
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, (int, long)):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return format_string._formatter_parser()
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = field_name._formatter_field_name_split()
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| {
"repo_name": "albertjan/pypyjs-presentation",
"path": "assets/js/pypy.js-0.3.1/lib/modules/string.py",
"copies": "6",
"size": "20817",
"license": "unlicense",
"hash": 8104984555131726000,
"line_mean": 31.3748055988,
"line_max": 81,
"alpha_frac": 0.6284767258,
"autogenerated": false,
"ratio": 4.101063829787234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003063482524363309,
"num_lines": 643
} |
"""A collection of TAR images."""
import hashlib
import logging
import os
import shutil
import tarfile
import tempfile
import urllib.parse
import requests
import requests_kerberos
from . import _image_base
from . import _repository_base
from . import native
from treadmill import fs
_LOGGER = logging.getLogger(__name__)
TAR_DIR = 'tar'
def _download(url, temp):
"""Downloads the image."""
_LOGGER.debug('Downloading tar file from %r to %r.', url, temp)
krb_auth = requests_kerberos.HTTPKerberosAuth(
mutual_authentication=requests_kerberos.DISABLED
)
request = requests.get(url, stream=True, auth=krb_auth)
shutil.copyfileobj(request.raw, temp)
def _copy(path, temp):
"""Copies the image."""
_LOGGER.debug('Copying tar file from %r to %r.', path, temp)
with open(path, 'r+b') as f:
shutil.copyfileobj(f, temp)
def _sha256sum(path):
"""Calculates the SHA256 hash of the file."""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(sha256.block_size), b''):
sha256.update(block)
return sha256.hexdigest()
class TarImage(_image_base.Image):
"""Represents a TAR image."""
__slots__ = (
'tm_env'
'image_path'
)
def __init__(self, tm_env, image_path):
self.tm_env = tm_env
self.image_path = image_path
def unpack(self, container_dir, root_dir, app):
_LOGGER.debug('Extracting tar file %r to %r.', self.image_path,
root_dir)
with tarfile.open(self.image_path) as tar:
tar.extractall(path=root_dir)
native.NativeImage(self.tm_env).unpack(container_dir, root_dir, app)
# TODO: cache instead of removing TAR files.
fs.rm_safe(self.image_path)
class TarImageRepository(_repository_base.ImageRepository):
"""A collection of TAR images."""
def __init__(self, tm_env):
super(TarImageRepository, self).__init__(tm_env)
def get(self, url):
images_dir = os.path.join(self.tm_env.images_dir, TAR_DIR)
fs.mkdir_safe(images_dir)
image = urllib.parse.urlparse(url)
sha256 = urllib.parse.parse_qs(image.query).get('sha256', None)
with tempfile.NamedTemporaryFile(dir=images_dir, delete=False,
prefix='.tmp') as temp:
if image.scheme == 'http':
_download(url, temp)
else:
_copy(image.path, temp)
if not tarfile.is_tarfile(temp.name):
_LOGGER.error('File %r is not a tar file.', url)
raise Exception('File {0} is not a tar file.', url)
new_sha256 = _sha256sum(temp.name)
if sha256 is not None and sha256[0] != new_sha256:
_LOGGER.error('Hash does not match %r - %r', sha256[0], new_sha256)
raise Exception(
'Given hash of {0} does not match.'.format(new_sha256), url)
# TODO: rename tar file to sha256 to allow for caching.
return TarImage(self.tm_env, temp.name)
| {
"repo_name": "gaocegege/treadmill",
"path": "treadmill/runtime/linux/image/tar.py",
"copies": "3",
"size": "3085",
"license": "apache-2.0",
"hash": 4117737456166516700,
"line_mean": 26.5446428571,
"line_max": 79,
"alpha_frac": 0.6087520259,
"autogenerated": false,
"ratio": 3.533791523482245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5642543549382245,
"avg_score": null,
"num_lines": null
} |
"""A collection of TAR images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import io
import logging
import os
import shutil
import tarfile
import tempfile
import requests
import requests_kerberos
from six.moves import urllib_parse
from . import _image_base
from . import _repository_base
from . import native
from treadmill import fs
_LOGGER = logging.getLogger(__name__)
TAR_DIR = 'tar'
def _download(url, temp):
"""Downloads the image."""
_LOGGER.debug('Downloading tar file from %r to %r.', url, temp)
krb_auth = requests_kerberos.HTTPKerberosAuth(
mutual_authentication=requests_kerberos.DISABLED
)
request = requests.get(url, stream=True, auth=krb_auth)
shutil.copyfileobj(request.raw, temp)
def _copy(path, temp):
"""Copies the image."""
_LOGGER.debug('Copying tar file from %r to %r.', path, temp)
with io.open(path, 'rb') as f:
shutil.copyfileobj(f, temp)
def _sha256sum(path):
"""Calculates the SHA256 hash of the file."""
sha256 = hashlib.sha256()
with io.open(path, 'rb') as f:
for block in iter(lambda: f.read(sha256.block_size), b''):
sha256.update(block)
return sha256.hexdigest()
class TarImage(_image_base.Image):
"""Represents a TAR image."""
__slots__ = (
'tm_env'
'image_path'
)
def __init__(self, tm_env, image_path):
self.tm_env = tm_env
self.image_path = image_path
def unpack(self, container_dir, root_dir, app):
_LOGGER.debug('Extracting tar file %r to %r.', self.image_path,
root_dir)
with tarfile.open(self.image_path) as tar:
tar.extractall(path=root_dir)
native.NativeImage(self.tm_env).unpack(container_dir, root_dir, app)
# TODO: cache instead of removing TAR files.
fs.rm_safe(self.image_path)
class TarImageRepository(_repository_base.ImageRepository):
"""A collection of TAR images."""
def __init__(self, tm_env):
super(TarImageRepository, self).__init__(tm_env)
def get(self, url):
images_dir = os.path.join(self.tm_env.images_dir, TAR_DIR)
fs.mkdir_safe(images_dir)
image = urllib_parse.urlparse(url)
sha256 = urllib_parse.parse_qs(image.query).get('sha256', None)
with tempfile.NamedTemporaryFile(dir=images_dir, delete=False,
prefix='.tmp') as temp:
if image.scheme == 'http':
_download(url, temp)
else:
_copy(image.path, temp)
if not tarfile.is_tarfile(temp.name):
_LOGGER.error('File %r is not a tar file.', url)
raise Exception('File {0} is not a tar file.', url)
new_sha256 = _sha256sum(temp.name)
if sha256 is not None and sha256[0] != new_sha256:
_LOGGER.error('Hash does not match %r - %r', sha256[0], new_sha256)
raise Exception(
'Given hash of {0} does not match.'.format(new_sha256), url)
# TODO: rename tar file to sha256 to allow for caching.
return TarImage(self.tm_env, temp.name)
| {
"repo_name": "captiosus/treadmill",
"path": "treadmill/runtime/linux/image/tar.py",
"copies": "1",
"size": "3266",
"license": "apache-2.0",
"hash": -3270221878946797600,
"line_mean": 26.4453781513,
"line_max": 79,
"alpha_frac": 0.6172688304,
"autogenerated": false,
"ratio": 3.5538628944504898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46711317248504897,
"avg_score": null,
"num_lines": null
} |
"""A collection of text rendering functions"""
def write(s,font,pos,color,text,border=1):
"""Write text to a surface with a black border"""
# Render the text in black, at various offsets to fake a border
tmp = font.render(text,1,(0,0,0))
dirs = [(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]
for dx,dy in dirs:
s.blit(tmp,(pos[0]+dx*border,pos[1]+dy*border))
# Now render the text properly, in the proper color
tmp = font.render(text,1,color)
s.blit(tmp,pos)
def writec(s,font,color,text,border=1):
"""Write centered text to a surface with a black border"""
# Center the text within the destination surface
w,h = font.size(text)
x = (s.get_width()-w)/2
y = (s.get_height()-h)/2
write(s,font,(x,y),color,text,border)
def writepre(s,font,rect,color,text):
"""Write preformatted text on a pygame surface"""
r,c,txt = rect,color,text
txt = txt.replace("\t"," ")
tmp = font.render(" ",1,c)
sw,sh = tmp.get_size()
y = r.top
for sentence in txt.split("\n"):
x = r.left
tmp = font.render(sentence,1,c)
s.blit(tmp,(x,y))
y += sh
def writewrap(s, font, rect, color, text, maxlines=None, wrapchar=False):
"""Write wrapped text on a pygame surface.
maxlines -- specifies the maximum number of lines to write
before stopping
wrapchar -- whether to wrap at the character level, or
word level
"""
r,c,txt = rect,color,text
txt = txt.replace("\t", " "*8)
tmp = font.render(" ", 1, c)
sw,sh = tmp.get_size()
y = r.top
row = 1
done = False
for sentence in txt.split("\n"):
x = r.left
if wrapchar:
words = sentence
else:
words = sentence.split(" ")
for word in words:
if (not wrapchar):
word += " "
tmp = font.render(word, 1, c)
(iw, ih) = tmp.get_size()
if (x+iw > r.right):
x = r.left
y += sh
row += 1
if (maxlines != None and row > maxlines):
done = True
break
s.blit(tmp, (x, y))
#x += iw+sw
x += iw
if done:
break
y += sh
row += 1
if (maxlines != None and row > maxlines):
break
| {
"repo_name": "Southpaw-TACTIC/Team",
"path": "src/python/Lib/site-packages/pgu/text.py",
"copies": "1",
"size": "2390",
"license": "epl-1.0",
"hash": 6731692508059365000,
"line_mean": 29.641025641,
"line_max": 73,
"alpha_frac": 0.5171548117,
"autogenerated": false,
"ratio": 3.3661971830985915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43833519947985916,
"avg_score": null,
"num_lines": null
} |
"""A collection of tools for analysing a pdb file.
Helper module for the biostructmap package.
"""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from Bio.SeqIO import PdbIO
from Bio.SeqUtils import seq1
from Bio.Data.SCOPData import protein_letters_3to1
from Bio.PDB.Polypeptide import PPBuilder
import numpy as np
from .seqtools import align_protein_sequences
try:
from scipy.spatial import distance, cKDTree
SCIPY_PRESENT = True
except ImportError:
SCIPY_PRESENT = False
SS_LOOKUP_DICT = {
'H': 0,
'B': 1,
'E': 2,
'G': 3,
'I': 4,
'T': 5,
'S': 6,
'-': 7,
0: 'H',
1: 'B',
2: 'E',
3: 'G',
4: 'I',
5: 'T',
6: 'S',
7: '-'
}
def _euclidean_distance_matrix(model, selector='all'):
"""Compute the Euclidean distance matrix for all atoms in a pdb model.
Args:
model (Model): Bio.PDB Model object.
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
Returns:
np.array: A euclidean distance matrix.
np.array: A reference list of all atoms in the model (positionally
matched to the euclidean matrix).
"""
reference = []
coords = []
# Get all non-HET residues from all chains
residues = [res for chain in model for res in chain if
res.get_id()[0] == ' ']
#Filter on non-HET atoms
for residue in residues:
#If selecting based on all atoms within residue
if selector == 'all':
for atom in residue:
coords.append(atom.get_coord())
reference.append(atom.get_full_id()[2:4])
#If measuring distance on particular atoms
else:
if selector in residue:
select_atom = selector
#Revert to carbon alpha if atom is not found
elif 'CA' in residue:
select_atom = 'CA'
#if CA is not found, do not include residue in distance matrix
else:
continue
coords.append(residue[select_atom].get_coord())
reference.append(residue[select_atom].get_full_id()[2:4])
#Convert to a np array, and compute Euclidean distance.
coord_array = np.array(coords)
euclid_mat = _pairwise_euclidean_distance(coord_array)
ref_array = reference
return euclid_mat, ref_array
def _pairwise_euclidean_distance(coord_array):
'''Compute the pairwise euclidean distance matrix for a numpy array'''
if SCIPY_PRESENT:
euclid_mat = distance.pdist(coord_array, 'euclidean')
#Convert to squareform matrix
euclid_mat = distance.squareform(euclid_mat)
else:
euclid_mat = np.sqrt(((coord_array[:, :, None] -
coord_array[:, :, None].T) ** 2).sum(1))
return euclid_mat
def _get_nearby_matrix(model, selector, radius):
"""Get a matrix of all nearby atoms in a pdb model.
Args:
model (Model): Bio.PDB Model object.
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
radius (float): The radius within which to extract nearby residues/atoms.
Returns:
list: A nearby matrix (list of lists).
np.array: A reference list of all atoms in the model (positionally
matched to the nearby matrix).
"""
reference = []
coords = []
# Get all non-HET residues from all chains
residues = [res for chain in model for res in chain if
res.get_id()[0] == ' ']
#Filter on non-HET atoms
for residue in residues:
#If selecting based on all atoms within residue
if selector == 'all':
for atom in residue:
coords.append(atom.get_coord())
reference.append(atom.get_full_id()[2:4])
#If measuring distance on particular atoms
else:
if selector in residue:
select_atom = selector
#Revert to carbon alpha if atom is not found
elif 'CA' in residue:
select_atom = 'CA'
#if CA is not found, do not include residue in distance matrix
else:
continue
coords.append(residue[select_atom].get_coord())
reference.append(residue[select_atom].get_full_id()[2:4])
#Convert to a np array, and use a KDTree to identify points within a certain distance.
coord_array = np.array(coords)
point_tree = cKDTree(coord_array)
ball_tree = point_tree.query_ball_tree(point_tree, radius)
ref_array = reference
return ball_tree, ref_array
def nearby(model, radius=15, selector='all'):
"""
Takes a Bio.PDB model object, and find all residues within a radius of a
given residue.
Args:
model (Model): Bio.PDB Model object.
radius (float/int): The radius (Angstrom) over which to select nearby
residues
selector (str): The atom in each residue with which to compute
distances. The default setting is 'all', which gets all
non-heterologous atoms. Other potential options include 'CA', 'CB'
etc. If an atom is not found within a residue object, then method
reverts to using 'CA'.
Returns:
dict: A dictionary containing nearby residues for each
residue in the chain.
"""
#TODO: Cleanup case when scipy not present. Could just enforce scipy usage.
ref_dict = {}
#if SCIPY_PRESENT:
if SCIPY_PRESENT:
near_map, ref = _get_nearby_matrix(model, selector, radius)
_ref_dict = defaultdict(set)
for i, x in enumerate(near_map):
_ref_dict[ref[i]].update({ref[y] for y in x})
else:
euclidean_distance, ref = _euclidean_distance_matrix(model, selector)
within_radius = euclidean_distance <= radius
del euclidean_distance
# 1-indexed as 0 means not within range.
near_map = within_radius * np.arange(1, len(ref)+1)
#Iterate over all atoms in Euclidean distance matrix.
for i, atom in enumerate(near_map):
if atom[i] not in ref_dict:
ref_dict[atom[i]] = atom[np.nonzero(atom)]
else:
ref_dict[atom[i]] = np.append(ref_dict[atom[i]],
atom[np.nonzero(atom)])
_ref_dict = {}
del near_map
# Go from numerical index to residue id
for key, value in ref_dict.items():
_ref_dict[ref[key-1]] = {ref[x-1] for x in value} | _ref_dict.get(ref[key-1], set())
return _ref_dict
def mmcif_sequence_to_res_id(mmcif_dict):
"""Create a lookup from mmcif sequence id to a pdb residue ID and vice versa.
This allows mapping between reference PDB sequences and BioPython
residue ids.
Args:
mmcif_dict (dict): An mmcif dictionary from a Bio.PDB.Structure object.
Returns:
dict: Dictionary in the form {full Bio.PDB residue id: (chain,
mmcif sequence id)}.
dict: Dictionary in the form {(chain, mmcif sequence id):
full Bio.PDB residue id}
"""
_seq_id_list = mmcif_dict['_atom_site.label_seq_id']
seq_id_list = []
for seq_id in _seq_id_list:
try:
seq_id_list.append(int(seq_id))
except ValueError:
seq_id_list.append(None)
# Parse dictionary to extract sequences from mmCIF file
auth_chain_id_list = mmcif_dict['_atom_site.auth_asym_id']
icode_list = mmcif_dict['_atom_site.pdbx_PDB_ins_code']
# Create list of Bio.PDB full ids for each _atom_site
hetero = mmcif_dict['_atom_site.group_PDB']
residue_id_list = mmcif_dict['_atom_site.label_comp_id']
# Use author provided numbering if given.
if "_atom_site.auth_seq_id" in mmcif_dict:
auth_seq_id_list = [int(x) for x in mmcif_dict["_atom_site.auth_seq_id"]]
else:
auth_seq_id_list = [int(x) for x in mmcif_dict["_atom_site.label_seq_id"]]
# Get HET flags as used by Bio.PDB
het_flag = [' ' if het == 'ATOM' else 'W' if res_type in ['HOH', 'WAT']
else 'H_' + res_type for het, res_type in
zip(hetero, residue_id_list)]
# Get insertion code flags as used by Bio.PDB
icode_flag = [' ' if icode == '?' else icode for icode in icode_list]
# Residue ID as used by Bio.PDB
bio_residue_ids = [*zip(het_flag, auth_seq_id_list, icode_flag)]
full_ids = list(zip(auth_chain_id_list, bio_residue_ids))
chain_and_seq_id_list = list(zip(auth_chain_id_list, seq_id_list))
full_id_to_poly_seq_index = {key: value for key, value in
zip(full_ids, chain_and_seq_id_list)}
poly_seq_index_to_full_id = {value: key for key, value in
full_id_to_poly_seq_index.items() if value}
return full_id_to_poly_seq_index, poly_seq_index_to_full_id
def get_pdb_seq(filename):
"""
Get a protein sequence from a PDB file.
Will return multiple sequences if PDB file contains several chains.
Args:
filename (str/filehandle): A PDB filename or file-like object.
Returns:
dict: Protein sequences (str) accessed by chain id.
"""
#Open PDB file and get sequence data
try:
with open(filename, 'r') as f:
seq = [s for s in PdbIO.PdbSeqresIterator(f)]
except TypeError:
#If file-like object is passed instead (io.StringIO)
seq = [s for s in PdbIO.PdbSeqresIterator(filename)]
#A bit of manipulation to get Seq object into a dictionary
#Key is chain ID, and value is sequence as a string.
try:
sequences = {s.id.split(":")[1]:''.join([x for x in s]) for s in seq}
except IndexError:
sequences = {s.id:''.join([x for x in s]) for s in seq}
return sequences
def get_mmcif_canonical_seq(mmcif_dict):
"""
Get structure sequences from an mmCIF file.
Args:
filename (str/filehandle): An mmCIF filename or file-like object.
Returns:
dict: Protein sequences (str) accesssed by chain id.
"""
# TODO Should we use _pdbx_poly_seq_scheme here?
# Parse dictionary to extract sequences from mmCIF file
try:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code_can']
except KeyError:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code']
if isinstance(entity_seqs, list):
chain_ids = [ids.split(',') for ids in
mmcif_dict['_entity_poly.pdbx_strand_id']]
# Create dictionary of chain id (key) and sequences (value)
sequences = dict((x, sublist[1].replace('\n', '')) for sublist in
zip(chain_ids, entity_seqs) for x in sublist[0])
else:
chain_ids = mmcif_dict['_entity_poly.pdbx_strand_id'].split(',')
sequences = {chain_id: entity_seqs.replace('\n', '') for chain_id in chain_ids}
return sequences
def get_mmcif_seqs(mmcif_dict):
"""
Get structure sequences from an mmCIF file.
Args:
filename (str/filehandle): An mmCIF filename or file-like object.
Returns:
dict: Protein sequences (str) accesssed by chain id.
"""
# TODO Should we use _pdbx_poly_seq_scheme here?
# Parse dictionary to extract sequences from mmCIF file
entity_ids = mmcif_dict['_entity_poly.entity_id']
chain_ids = [ids.split(',') for ids in mmcif_dict['_entity_poly.pdbx_strand_id']]
try:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code_can']
except KeyError:
entity_seqs = mmcif_dict['_entity_poly.pdbx_seq_one_letter_code']
if isinstance(entity_seqs, list):
# Create dictionary of chain id (key) and sequences (value)
sequences = dict((x, sublist[1].replace('\n', '')) for sublist in
zip(chain_ids, entity_seqs) for x in sublist[0])
else:
sequences = {chain_ids[0]: entity_seqs.replace('\n', '')}
return sequences
def get_pdb_seq_from_atom(chain):
"""
Get a protein sequence from chain atoms in a PDB file.
This is used as a 'last resort' when sequence is not available in PDB
headers.
Args:
chain: A Bio.PDB chain object.
Returns:
str: Protein sequence.
"""
# TODO Deprecate and revert to using polypeptide builder.
seq_dict = {}
for residue in chain.get_residues():
res_num = int(residue.id[1])
aminoacid = seq1(residue.resname, custom_map=protein_letters_3to1)
seq_dict[res_num] = aminoacid
# TODO Fix this, as not all residues are sequentially numbered.
pdb_sequence = [seq_dict[x] for x in sorted(seq_dict)]
return ''.join([x for x in pdb_sequence])
def match_pdb_residue_num_to_seq(model, ref=None):
"""Match PDB residue numbering (as given in PDB file) to
a reference sequence (can be pdb sequence) numbered by index.
Reference sequence is 1-indexed (and is indexed as such in output).
Args:
model: A biostructmap Model object.
ref (dict): A dictionary containing reference protein sequences for each
chain in the protein structure. Defaults to the protein sequences
given in PDB file.
Returns:
dict: A dictionary mapping reference sequence index (key) to
residue numbering as given in the PDB file (value). For example,
we might have a key of ('A', 17) for the 17th residue in the
reference sequence for chain 'A', with a value of
('A', (' ', 273, ' ')) that represents the Bio.PDB identifier for
the corresponding residue.
"""
ppb = PPBuilder()
polypeptides = ppb.build_peptides(model.parent().structure)
if ref is None:
ref = model.parent().sequences
output = {}
for peptide in polypeptides:
peptide_sequence = peptide.get_sequence()
# Presume that peptide belongs to a single chain
chain_id = peptide[0].get_full_id()[2]
_, ref_to_pdb = align_protein_sequences(peptide_sequence, ref[chain_id])
for ref_pos, pdb_pos in ref_to_pdb.items():
output[(chain_id, ref_pos)] = peptide[pdb_pos - 1].get_full_id()[2:4]
return output
| {
"repo_name": "andrewguy/biostructmap",
"path": "biostructmap/pdbtools.py",
"copies": "1",
"size": "14755",
"license": "mit",
"hash": 6308888892342810000,
"line_mean": 38.8783783784,
"line_max": 96,
"alpha_frac": 0.6147746527,
"autogenerated": false,
"ratio": 3.6128795298726737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47276541825726737,
"avg_score": null,
"num_lines": null
} |
""" A collection of tools for manipulating Numpy arrays
This module contains a number of convenience routines for
common manipulations of data stored in 1-D and 2-D arrays.
"""
from math import floor
import numpy as np
def find_indices(lats, lons, lat0, lon0, dlat, dlon, nrows, ncols):
"""Find row and column indices into a 2D array.
The location of each element in the 2D array is specified by the latitude
and longitude of the *centre* of the cell at the lower left corner of the
array (lat0, lon0) and the incremental change in latitude and longitude
between each element 'dlat' and 'dlon'. The number of rows and columns in
the array is given by nrows and ncols respectively.
Returns: Lists of row and column indices, indices have a value of -999 if
the input location is outside of the defined data region.
"""
Lats = np.asarray(lats)
Lons = np.asarray(lons)
min_lat = lat0 - 0.5*dlat
max_lat = min_lat + nrows*dlat
min_lon = lon0 - 0.5*dlon
max_lon = min_lon + ncols*dlon
if Lats.shape != () and Lons.shape != ():# multiple points
row_indices = []
for lat in lats:
if lat < min_lat or max_lat < lat:# outside region
row = -999
elif (max_lat - dlat) <= lat <= max_lat:# first row
row = 0
elif min_lat <= lat <= (min_lat + dlat):# last row
row = nrows-1
else:# everywhere else
diff = lat - min_lat
row = int(floor(nrows - diff/dlat))
row_indices.append(row)
col_indices = []
for lon in lons:
if lon < min_lon or max_lon < lon:# outside region
col = -999
elif min_lon <= lon <= (min_lon + dlon):# first column
col = 0
elif (max_lon - dlon) <= lon <= max_lon:# last column
col = ncols-1
else:# everywhere else
diff = lon - min_lon
col = int(floor(diff/dlon))
col_indices.append(col)
else:# scalar input
if lats < min_lat or max_lat < lats:# outside region
row = -999
elif (max_lat - dlat) <= lats <= max_lat:# first row
row = 0
elif min_lat <= lats <= (min_lat + dlat):# last row
row = nrows-1
else:# everywhere else
diff = lats - min_lat
row = int(floor(nrows - diff/dlat))
row_indices = row
if lons < min_lon or max_lon < lons:# outside region
col = -999
elif min_lon <= lons <= (min_lon + dlon):# first column
col = 0
elif (max_lon - dlon) <= lons <= max_lon:# last column
col = ncols-1
else:# everywhere else
diff = lons - min_lon
col = int(floor(diff/dlon))
col_indices = col
return row_indices, col_indices
def embed(arr, shape, pos='centre'):
""" Embed a 2D array in a larger one.
This function returns a 2D array embeded in a larger one with
size determined by the shape parameter. The purpose is for
border padding an input array with zeros. The embedded array
is centred in the larger array.
"""
newsize = np.asarray(shape)
currsize = np.array(arr.shape)
startind = (newsize - currsize) / 2
endind = startind + currsize
sr = startind[0]
sc = startind[1]
er = endind[0]
ec = endind[1]
result = np.zeros(shape)
result[sr:er, sc:ec] = arr
return result
def crop(arr, shape, pos='centre'):
""" Crop a 2D array from a larger one.
This function returns a 2D array cropped from the centre of
the larger input array.
"""
newsize = np.asarray(shape)
currsize = np.array(arr.shape)
startind = (currsize - newsize) / 2
endind = startind + newsize
sr = startind[0]
sc = startind[1]
er = endind[0]
ec = endind[1]
return arr[sr:er, sc:ec]
class Raster(np.ndarray):
#class Raster(np.ma.MaskedArray): # TO DO: Consider this at some point..
"""
Raster(data, x0, y0, dx, dy, origin='Lower')
This class attempts to unify the handling of remote sensing data and the
typical operations performed on it. Many data formats are not natively
ingested by a GIS and are therefore difficult to process using these tools.
Raster is a subclass of a Numpy ndarray containing additional
attributes, which describe the data grid. The origin of the grid is
defined by the point (`x0`, `y0`) in the units of the map projection
and coordinate system in which the data are defined.
Parameters
----------
data : array_like
A 2D array containing the raster data.
x0 : float
The x origin of the data grid in the units of the map projection and
coordinate system.
y0 : float
The y origin of the data grid in the units of the map projection and
coordinate system.
dx : float
The regular grid spacing along the x axis. Irregular grids are not
supported.
dy : float
The regular grid spacing along the y axis. Irregular grids are not
supported.
origin : {'Lower', 'Upper'}
A string describing where (`x0`, `y0`) is located. The default value
of `Lower` means that the grid origin is at the centre of the lower
left grid cell. The only accepted alternative value is `Upper`, which
defines the origin as the top left.
Attributes
----------
rows : int
The number of rows in the data grid.
cols : int
The number of columns in the data grid.
x0 : float
The x origin of the data grid in the units of the map projection and
coordinate system.
y0 : float
The y origin of the data grid in the units of the map projection and
coordinate system.
dx : float
The regular grid spacing along the x axis. Irregular grids are not
supported.
dy : float
The regular grid spacing along the y axis. Irregular grids are not
supported.
origin : {'Lower', 'Upper'}
A string describing where (`x0`, `y0`) is located. The default value
of `Lower` means that the grid origin is at the centre of the lower
left grid cell. The only accepted alternative value is `Upper`, which
defines the origin as the top left.
Methods
-------
subset(min_x, min_y, max_x, max_y)
Extract a sub-region from the Raster.
"""
def __new__(cls, data, x0, y0, dx, dy, origin='Lower'):
if origin != 'Lower':
raise NotImplementedError("'%s' is not a suitable origin" % origin)
# Input array is an already formed ndarray instance
# or array_like object.
# We first cast to be our class type
obj = np.asarray(data).view(cls)
# add the new attribute to the created instance
obj.rows, obj.cols = obj.shape
obj.dx = dx
obj.dy = dy
obj.x0 = x0
obj.y0 = y0
obj.origin = origin
# Finally, we must return the newly created object:
return obj
# # TO DO:
# # Slicing of raster objects needs to be handled carefully..
# def __array_finalize__(self, obj):
# err = 'Slicing of Raster objects not supported, ' \
# 'please use the subset() method instead.'
# raise NotImplementedError(err)
# # reset the attribute from passed original object
# self.rows, self.cols = self.shape
# self.dx = getattr(obj, 'dx', None)
# self.dy = getattr(obj, 'dy', None)
# self.x0 = getattr(obj, 'x0', None)
# self.y0 = getattr(obj, 'y0', None)
# self.origin = getattr(obj, 'origin', None)
# # We do not need to return anything
def subset(self, min_x, min_y, max_x, max_y):
"""Extract a sub-region from the Raster.
Crop a sub-region from the Raster. A Raster object containing a copy
of the relevant portion of data is returned and its attributes reflect
the new origin and shape of the data. No resampling is done and the
resulting Raster will only match the requested region within a
tolerance defined by the grid spacing (`dx` and `dy`).
Parameters
----------
min_x : float
The minimum x value of the required sub-region.
min_y : float
The minimum y value of the required sub-region.
max_x : float
The maximum x value of the required sub-region.
max_y : float
The maximum y value of the required sub-region.
Returns
-------
result : Raster
A sub-region of the original Raster.
"""
Y = [max_y, min_y]
X = [min_x, max_x]
row_indices, col_indices = find_indices(Y, X,
self.y0, self.x0,
self.dy, self.dx,
self.rows, self.cols)
min_row, max_row = np.asarray(row_indices)
min_col, max_col = np.asarray(col_indices)
# TO DO: Assign correct values to x0, y0 when origin='Upper'
# is implemented..
if self.origin == 'Lower':
new_x0 = self.x0 + min_col*self.dx
new_y0 = self.y0 + (self.rows - 1 - max_row)*self.dy
result = Raster(self[min_row:max_row, min_col:max_col],
x0=new_x0,
y0=new_y0,
dx=self.dx,
dy=self.dy,
origin=self.origin)
return result
def sample(self, x, y):
"""Sample the Raster at multiple locations.
Sample the Raster at multiple scattered locations. Sampling is done
using a nearest neighbour approach and no interpolation. The values
of the Raster grid cells whose centres are closest to the locations
specifed by `x` and `y` are returned.
Parameters
----------
x : array_like
An array of x values for each location.
y : array_like
An array of Y values for each location.
Returns
-------
result : ndarray
An array of values sampled from the Raster.
"""
if self.origin == 'Lower':
row_indices, col_indices = find_indices(y, x,
self.y0, self.x0,
self.dy, self.dx,
self.rows, self.cols)
row_indices = np.asarray(row_indices)
col_indices = np.asarray(col_indices)
mask = (row_indices != -999) & (col_indices != -999)
row_indices = row_indices[mask]
col_indices = col_indices[mask]
result = np.ones(len(x), dtype=self.dtype)
result *= -999
result[mask] = self[row_indices, col_indices]
return result
| {
"repo_name": "sahg/SAHGutils",
"path": "sahgutils/io/arraytools.py",
"copies": "1",
"size": "11695",
"license": "bsd-3-clause",
"hash": 6955921947869257000,
"line_mean": 34.6614420063,
"line_max": 79,
"alpha_frac": 0.5415989739,
"autogenerated": false,
"ratio": 4.1325088339222615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016137865301611445,
"num_lines": 319
} |
"""A collection of tools for rendering information about xml (typically errors)"""
from __future__ import unicode_literals
from __future__ import print_function
def extract_lines(code, line, padding=2):
"""Extracts a number of lines from code surrounding a given line number,
returns a list of tuples that contain the line number (1 indexed) and the line text.
"""
lines = code.splitlines()
start = max(0, line - padding - 1)
end = min(len(lines), line + padding - 1)
showlines = lines[start : end + 1]
linenos = [n + 1 for n in range(start, end + 1)]
return zip(linenos, showlines)
def extract(code, line, padding=3):
lines = extract_lines(code, line, padding)
start = lines[0][0]
text = "\n".join(l[1] for l in lines)
return start, text
def number(code, linestart=1, highlight_line=-1, number_wrap=None):
if number_wrap is None:
number_wrap = lambda n: n
lines = code.splitlines()
max_line = max(6, max(len(str(len(l))) for l in lines))
out_lines = []
for lineno, line in zip(range(linestart, linestart + len(lines)), lines):
if lineno == highlight_line:
number = ("*%i " % lineno).rjust(max_line)
else:
number = ("%i " % lineno).rjust(max_line)
out_lines.append(number + line)
return "\n".join(out_lines)
def column_to_spaces(line, col):
"""Returns the number of space required to reach a point in a string"""
spaces = 0
for colno, char in enumerate(line):
spaces += 4 if col == "\t" else 1
if colno + 1 == col:
return spaces
return spaces
def render_error(code, show_lineno, padding=3, col=None, colors=False, col_text="here"):
lines = extract_lines(code, show_lineno, padding=padding)
linenos = [str(lineno) for lineno, _ in lines]
maxlineno = max(len(l) for l in linenos)
render_lines = []
for lineno, line in lines:
if lineno == show_lineno:
fmt = "*%s %s"
else:
fmt = " %s %s"
render_lines.append(fmt % (str(lineno).ljust(maxlineno), line))
if col is not None and lineno == show_lineno:
point_at = column_to_spaces(line, col)
pad = " " * (maxlineno + 1)
if point_at > len(col_text) + 1:
render_lines.append(pad + (col_text + " ^").rjust(point_at + 1))
else:
render_lines.append(pad + "^".rjust(point_at + 1) + " " + col_text)
return "\n".join(line.replace("\t", " " * 4) for line in render_lines)
if __name__ == "__main__":
xml = """<moya xmlns="http://moyaproject.com">
<mountpoint name="testmount" libname="root">
<url name="article" url="/{year}/{month}/{day}/{slug}/" methods="GET" target="viewpost">
<debug>url main: ${url.year}, ${url.month}, ${url.day}, ${url.slug}</debug>
</url>
<url name="front" url="/" methods="GET">
<debug>Front...</debug>
<return><str>Front</str></return>
</url>
</mountpoint>
<macro docname="viewpost">
<debugIn viewpost</debug>
<return><str>Hello, World</str></return>
<return>
<response template="birthday.html">
<str dst="title">My Birthday</str>
<str dst="body">It was my birthday today!</str>
</response>
</return>
</macro>
<!--
<macro libname="showapp">
<debug>App is ${app}</debug>
</macro>
<macro libname="blogmacro">
<debug>Called blogmacro in blog lib</debug>
</macro>
<macro libname="blogmacro2">
<debug>Called blogmacro2 with app: ${debug:app}</debug>
</macro>
-->
</moya>"""
print(render_error(xml, 14, col=5))
| {
"repo_name": "moyaproject/moya",
"path": "moya/xmlreport.py",
"copies": "1",
"size": "3639",
"license": "mit",
"hash": -238926830751329000,
"line_mean": 30.6434782609,
"line_max": 92,
"alpha_frac": 0.5902720528,
"autogenerated": false,
"ratio": 3.3725671918443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9454179279560706,
"avg_score": 0.0017319930167189314,
"num_lines": 115
} |
"""A collection of tools for various purely Python operations"""
from random import Random
import itertools
import string
# In theory should be few to no imports outside perhaps stdlib here
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
return ''.join(Random().sample(string.letters+string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
outLines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
outLines.append(''.join(lines))
return '\n'.join(outLines)
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Example
-------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
_ = seq_it_next.next()
return itertools.izip(seq_it, seq_it_next)
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x:x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
__iter__ = dict.iteritems
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
#===============================================================================
# Set operations
#===============================================================================
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
| {
"repo_name": "willgrass/pandas",
"path": "pandas/core/pytools.py",
"copies": "1",
"size": "2985",
"license": "bsd-3-clause",
"hash": 1512231176301478000,
"line_mean": 25.1363636364,
"line_max": 80,
"alpha_frac": 0.5353433836,
"autogenerated": false,
"ratio": 3.83183568677792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486717907037792,
"avg_score": null,
"num_lines": null
} |
"""A collection of tools, tips, and tricks.
2009-07-20 22:36 IJC: Created
2010-10-28 11:53 IJMC: Updated documentation for Sphinx.
2011-06-15 09:34 IJMC: More functions have been added; cleaned documentation.
"""
import pdb
import numpy as np
def getfigs():
"""Return a list of all open matplotlib figures.
No inputs or options."""
from matplotlib._pylab_helpers import Gcf
figs = [manager.canvas.figure for manager in Gcf.get_all_fig_managers()]
figlist = [fig.number for fig in figs]
return figlist
def nextfig():
"""Return one greater than the largest-numbered figure currently
open. If no figures are open, return unity.
No inputs or options."""
# 2010-03-01 14:28 IJC: Created
figlist = getfigs()
if len(figlist)==0:
return 1
else:
return max(figlist)+1
return figlist
def printfigs(filename, figs=None, format=None, pdfmode='texexec', verbose=False, closefigs=False):
"""Print desired figures using designated 'format'. Concatenate PDFs.
:Inputs:
filename -- string. prepended to all open figures
figs -- int or list.
figures to access, then apply savefig to. If None, print
all open figures; if -1, print current figure.
format -- string or list of strings.
if 'pdf', all images are concatenated into one file (use
"pdfs" for individual pdf figure files)
pdfmode -- string;
method of concatenating PDFs. Either 'texexec' or 'gs'
(for GhostScript) or 'tar' to wrap individual
figures in a Tarball.
closefigs -- bool
If True, close each figure after printing it to disk.
:NOTES:
If no explicit path is passed and a subdirectory 'figures'
exists in the current directory, the figures will be printed in
'figures' instead.
:EXAMPLE:
::
from pylab import *
figure(1); plot(arange(10), randn(10), 'ob')
figure(2); plot(arange(15), randn(15), '-xr')
printfigs('testing')
!open testing.pdf
"""
# 2009-07-20 23:10 IJC: Created; inspired by FGD.
# 2009-09-08 13:54 IJC: Made it work with single-figure, non-list input.
# 2010-02-02 11:50 IJC: Now it kills the 'logfile' detritus.
# 2010-10-27 17:05 IJC: New texexec syntax is "result=...", not "result ..."
# 2011-03-01 18:14 IJC: Added capability for multiple formats (in
# a list). Also, figure numbers are not
# catted to the filename when saving a
# single figure.
# 2011-08-29 10:23 IJMC: Now don't try to concatenate single PDF figures.
# 2012-11-01 11:41 IJMC: Slightly changed if-block for 'figs'.
# 2014-05-03 15:04 IJMC: Added 'closefigs' flag.
# 2014-09-02 08:50 IJMC: Added 'tar' PDFMODE
# 2015-12-08 09:03 IJMC: Now 'None' is also valid PDF mode
from pylab import savefig, figure, gcf, close
from matplotlib._pylab_helpers import Gcf
import os
import pdb
figlist = getfigs()
if verbose: print "Available figure numbers>>" ,figlist
if figs is None:
figs = figlist
elif figs is -1:
figs = [gcf().number]
else:
if hasattr(figs, '__iter__'):
figs = list(figs)
else:
figs = [figs]
figlist = [val for val in figs if val in figlist]
nfig = len(figlist)
print "Figures to print>>",figlist
if format==None:
format = filename[-3::]
filename = filename[0:len(filename)-4]
if hasattr(format, 'capitalize'):
format = [format]
nformat = 1
elif hasattr(format, '__iter__'):
nformat = len(format)
else:
format = [str(format)]
nformat = 1
if len(figlist)==0:
print "No open figures found; exiting."
return
for thisformat in format:
fnamelist = []
for ii in range(nfig):
if nfig>1:
fname = filename + str(figlist[ii])
else:
fname = filename
if thisformat=='pdf' and nfig>1:
fname = fname + '_temp'
if thisformat=='pdfs':
fname = fname + '.pdf'
else:
fname = fname + '.' + thisformat
figure(figlist[ii])
savefig(fname )
fnamelist.append(fname)
if closefigs and thisformat==format[-1]: # last time at this figure
close(figlist[ii])
if thisformat=='pdf':
if nfig==1:
savefig(fnamelist[0])
else: # we have to concatenate multiple PDF figures:
bigfilename = filename + '.' + thisformat
if os.path.isfile(bigfilename):
os.remove(bigfilename)
if pdfmode is None:
execstr, rmstr = '', ''
elif pdfmode=='gs':
execstr = 'gs -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=' + bigfilename
rmstr = ''
elif pdfmode=='texexec':
execstr = 'texexec --pdfcopy --result=' + bigfilename
rmstr = 'rm %s' % bigfilename.replace('pdf','log')
elif pdfmode[0:3]=='tar':
execstr = 'tar -cvf %s ' % bigfilename.replace('pdf','tar')
fnamelist_local = [os.path.split(fn)[1] for fn in fnamelist]
[os.rename(fn, fn2) for fn,fn2 in zip(fnamelist, fnamelist_local)]
rmstr = 'rm ' + ' '.join(fnamelist_local)
fnamelist = fnamelist_local
else:
execstr = ''
rmstr = ''
for fn in fnamelist:
execstr += ' ' + fn
#pdb.set_trace()
if verbose: print "PDFMODE exec call>>", execstr
os.system(execstr)
#subprocess.call(execstr)
#pdb.set_trace()
if len(rmstr)>0:
os.system(rmstr)
if pdfmode is not None:
for fn in fnamelist:
try:
os.remove(fn)
except:
pass
return
def plotstyle(i, c=['b', 'g', 'r', 'c', 'm', 'y', 'k'], \
s=['.', 'x', 's', '^', '*', 'o', '+', 'v', 'p', 'D'], \
l=['-', '--', '-.', ':']):
"""Return plot properties to help distinguish many types of plot symbols.
:INPUT:
i -- int.
:OPTIONAL INPUT:
c -- color, or list of colors accepted by pylab.plot
s -- symbol, or list of symbols accepted by pylab.plot
l -- linestyle, or list of linestyles accepted by pylab.plot
:OUTPUT:
tuple of (color, symbol, linestyle)
:REQUIREMENTS: :doc:`numpy`
"""
# 2009-09-10 16:42 IJC: Created
from numpy import tile, array
if not c.__class__==list:
c = list(c)
if not s.__class__==list:
s = list(s)
if not l.__class__==list:
l = list(l)
nc = len(c)
ns = len(s)
nl = len(l)
if not hasattr(i,'__iter__'):
i = array([i])
i = abs(array(i))
nrepc = (max(i)/nc+1.).astype(int)
nreps = (max(i)/ns+1.).astype(int)
nrepl = (max(i)/nl+1.).astype(int)
c = tile(c, nrepc)
s = tile(s, nreps)
l = tile(l, nrepl)
if len(i)==1:
ret = c[i][0], s[i][0], l[i][0]
else:
ret = list(c[i]),list(s[i]),list(l[i])
return ret
def flatten(L, maxdepth=100):
"""Flatten a list.
Stolen from http://mail.python.org/pipermail/tutor/2001-January/002914.html"""
# 2009-09-10 16:54 IJC: Input.
if type(L) != type([]): return [L]
if L == []:
return L
else:
maxdepth -= 1
return flatten(L[0]) + flatten(L[1:], maxdepth=maxdepth)
def replaceall(seq, obj, rep):
"""Replace all instances of 'obj' with 'rep' in list 'seq'
:INPUT:
seq -- (list) list within which to find-and-replace elements
obj -- target object to replace
rep -- replacement object
:EXAMPLE:
::
import tools
b = [2, ['spam', ['eggs', 5, dict(spam=3)]]]
tools.replaceall(b, 'spam', 'bacon')
print b
:NOTES:
-- Will fail if 'obj' is itself a list.
-- Edits list in-place, so make a copy first if you want to
retain the old version of your list.
-- Has not been tested for extremely deep lists
:SEE ALSO:
:func:`popall`
"""
#2009-09-11 10:22 IJC: Created
n = len(seq)
for ii in range(n):
if seq[ii].__class__==list:
replaceall(seq[ii], obj, rep)
else:
if seq[ii]==obj:
seq[ii]=rep
return
def popall(seq, obj):
"""Remove all instances of 'obj' from list 'seq'
:INPUT:
seq -- (list) list from which to pop elements
obj -- target object to remove
:EXAMPLE:
::
import tools
b = [3, 'spam', range(5)]
tools.popall(b, 4)
print b
:NOTES:
-- Will fail if 'obj' is itself a list.
-- Edits list in-place, so make a copy first if you want to
retain the old version of your list.
-- Has not been tested for extremely deep lists
:SEE ALSO:
:func:`replaceall`
"""
#2009-09-11 10:22 IJC: Created
n = len(seq)
for ii in range(n):
print ii,seq[ii]
if seq[ii].__class__==list:
popall(seq[ii], obj)
doneYet = False
while not doneYet:
try:
seq.remove(obj)
except:
doneYet = True
return
def drawRectangle(x,y,width,height,**kw):
"""Draw a rectangle patch on the current, or specified, axes.
:INPUT:
x, y -- lower-left corner of rectangle
width, height -- dimensions of rectangle
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Rectangle`
:NOTE: Axes will NOT auto-rescale after this is called.
"""
# 2009-09-17 01:33 IJC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Rectangle((x,y), width, height, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawPolygon(xy,**kw):
"""Draw a rectangle patch on the current, or specified, axes.
:INPUT:
xy -- numpy array of coordinates, with shape Nx2.
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Polygon`
:SEE ALSO:
:func:`drawRectangle`
:NOTE: Axes will NOT auto-rescale after this is called.
"""
# 2010-12-02 19:58 IJC: Created from drawRectangle
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Polygon(xy, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawCircle(x,y,radius,**kw):
"""Draw a circular patch on the current, or specified, axes.
:INPUT:
x, y -- center of circle
radius -- radius of circle
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Circle`
:NOTE: Axes will NOT auto-rescale after this is called.
"""
# 2011-01-28 16:03 IJC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Circle((x,y), radius, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawEllipse(x,y,width, height,**kw):
"""Draw an elliptical patch on the current, or specified, axes.
:INPUT:
x, y -- center of ellipse
width -- width of ellipse
height -- width of ellipse
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Ellipse`
(angle, linewidth, fill, ...)
:NOTE: Axes will NOT auto-rescale after this is called.
:SEE_ALSO:
:func:`drawCircle`, :func:`drawRectangle`
"""
# 2011-10-20 11:32 IJMC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Ellipse((x,y), width, height, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def errxy(x,y,xbins, xmode='mean', ymode='mean', xerr='minmax', yerr='sdom', clean=None, binfactor=None, verbose=False,returnstats=False, timing=False, doindex=False):
"""Bin down datasets in X and Y for errorbar plotting
:INPUTS:
x -- (array) independent variable data
y -- (array) dependent variable data
xbins -- (array) edges of bins, in x-space. Only x-data
between two bin edges will be used. Thus if M bin
edges are entered, (M-1) datapoints will be returned.
If xbins==None, then no binning is done.
:OPTIONAL INPUT:
xmode/ymode -- (str) method to aggregate x/y data into datapoints:
'mean' -- use numpy.mean
'median' -- use numpy.median
'sum' -- use numpy.sum
None -- don't compute; return the empty list []
xerr/yerr -- (str) method to aggregate x/y data into errorbars
'std' -- sample standard deviation (numpy.std)
'sdom' -- standard deviation on the mean; i.e., std/sqrt(N)
'minmax' -- use full range of data in the bin
None -- don't compute; return the empty list []
binfactor -- (int) If not None, average over this many
consecutive values instead of binning explicitly by
time-based bins. Can also be a sequence, telling the
number of values over which to average. E.g.,
binfactor=[10,10,20] will bin over the first 10 points,
the second 10 points, and the next 20 points.
clean -- (dict) keyword options to clean y-data ONLY, via
analysis.removeoutliers, with an additional "nsigma"
keyword. See removeoutliers for more information.
E.g.: clean=dict(nsigma=5,remove='both',niter=1)
:OUTPUTS: a tuple of four arrays to be passed to matplotlib.pyplot.errorbar:
xx -- locations of the aggregated x-datapoint in each bin
yy -- locations of the aggregated y-datapoint in each bin
xerr -- x-errorbars
yerr -- y-errorbars
:EXAMPLE:
::
x = hstack((arange(10), arange(20)+40))
y = randn(len(x))
xbins = [-1,15,70]
xx,yy,xerr,yerr = errxy(x,y,xbins)
plot(x,y, '.b')
errorbar(xx,yy,xerr=xerr,yerr=yerr, fmt='or')
:NOTES:
To just bin down uncleaned data (i.e., no 'error' terms
returned), set clean, xerr, yerr to None. However, when
computing all values (xerr and yerr not None) it is faster
to set clean to some rediculous value, i.e.,
clean=dict(niter=0, nsigma=9e99). This probably means more
optimization could be done.
Be sure you call the errorbar function using the keywords xerr
and yerr, since otherwise the default order of inputs to the
function is (x,y,yerr,xerr).
Data 'x' are determined to be in a bin with sides (L, R) when
satisfying the condition (x>L) and (x<=R)
:SEE ALSO: matplotlib.pyplot.errorbar, :func:`analysis.removeoutliers`
:REQUIREMENTS: :doc:`numpy`, :doc:`analysis`
"""
# 2009-09-29 20:07 IJC: Created w/mean-median and std-sdom-minmax.
# 2009-12-14 16:01 IJC: xbins can be 'None' for no binning.
# 2009-12-15 10:09 IJC: Added "binfactor" option.
# 2009-12-22 09:56 IJC: "binfactor" can now be a sequence.
# 2009-12-29 01:16 IJC: Fixed a bug with binfactor sequences.
# 2010-04-29 09:59 IJC: Added 'returnstats' feature
# 2010-10-19 16:25 IJC: Added 'sum' option for x-data
# 2011-03-22 12:57 IJC: Added 'none' option for data and errors
# 2012-03-20 16:33 IJMC: Fixed bug; xmode=='none' now works.
# 2012-03-27 14:00 IJMC: Now using np.digitize -- speed boost.
# Rewrote code to optimize (somewhat),
# cleaned up 'import' statements.
# 2012-04-08 15:57 IJMC: New speed boost from adopting
# numpy.histogram-like implementation:
# numpy.searchsorted, etc.
# 2017-02-15 09:58 IJMC: Now use "doindex" for index-based binning.
import numpy as np
from analysis import removeoutliers
if timing:
import time
tic = time.time()
def sdom(data):
"""Return standard deviation of the mean."""
return np.std(data)/np.sqrt(data.size)
def getcenter(data, cmode):
"""Get data center based on mode. Helper function."""
if cmode is None:
ret = 0
elif cmode=='mean':
ret = np.mean(data)
elif cmode=='median':
ret = np.median(data)
elif cmode=='sum':
ret = np.sum(data)
return ret
def geterr(data, emode, cmode):
"""Get errorbar. Helper function."""
if emode is None:
ret = []
elif emode=='std':
ret = np.std(data)
elif emode=='sdom':
ret = sdom(data)
elif emode=='minmax':
if len(data)==0:
ret = [np.nan, np.nan]
else:
center = getcenter(data,cmode)
ret = [center-min(data), max(data)-center]
return ret
def cleandata(data, clean, returnstats=False):
"""Clean data using removeoutliers. Helper function."""
init_count = np.array(data).size
if clean==None: # Don't clean at all!
#clean = dict(nsigma=1000, niter=0)
if returnstats:
ret = data, (init_count, init_count)
else:
ret = data
else: # Clean the data somehow ('clean' must be a dict)
if not clean.has_key('nsigma'):
clean.update(dict(nsigma=99999))
data = removeoutliers(data, **clean)
if returnstats:
ret = data, (init_count, np.array(data).size)
else:
ret = data
return ret
if timing:
print "%1.3f sec since starting function; helpers defined" % (time.time() - tic)
####### Begin main function ##########
sorted_index = np.argsort(x)
x = np.array(x, copy=False)[sorted_index]
y = np.array(y, copy=False)[sorted_index]
#x = np.array(x,copy=True).ravel()
#y = np.array(y,copy=True).ravel()
xbins = np.array(xbins,copy=True).ravel()
if xbins[0]==None and binfactor==None:
if returnstats ==False:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan
else:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan, (x.size, x.size)
return ret
if binfactor==None: # used passed-in 'xbins'
xbins = np.sort(xbins)
elif hasattr(binfactor,'__iter__'): # use variable-sized bins
binfactor = np.array(binfactor).copy()
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = []
counter = 0
for ii in range(len(binfactor)):
thisbin = betweens[counter]
xbins.append(thisbin)
counter += binfactor[ii]
xbins.append(x.max() + 1)
else: # bin down by the same factor throughout
binfactor = int(binfactor)
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = betweens[::binfactor]
if timing:
print "%1.3f sec since starting function; bins defined" % (time.time() - tic)
nbins = len(xbins)-1
if doindex: nbins += 1
arraynan = np.array([np.nan])
exx = []
eyy = []
xx = np.zeros(nbins)
yy = np.zeros(nbins)
yy2 = np.zeros(nbins)
init_count, final_count = y.size, 0
if timing:
setuptime = 0
xdatatime = 0
ydatatime = 0
statstime = 0
#import pylab as py
#xxx = np.sort(x)
if timing: tic1 = time.time()
#inds = np.digitize(x, xbins)
if doindex:
inds2 = [(x==xbins[ii]) for ii in range(nbins)]
else:
inds2 = [[x.searchsorted(xbins[ii], side='left'), \
x.searchsorted(xbins[ii+1], side='left')] for ii in range(nbins)]
if timing: setuptime += (time.time() - tic1)
#pdb.set_trace()
#bin_means = [data[digitized == i].mean() for i in range(1, len(bins))]
def dojob(function, vector, inds):
if len(inds)==2:
ret = function(vector[inds[0]:inds[1]])
else:
ret = function(vector[inds])
return ret
def doerrjob(vector, err, mode, inds):
if len(inds)==2:
ret = geterr(vector[inds[0]:inds[1]], err, mode)
else:
ret = geterr(vector[inds], err, mode)
return ret
dox = xmode is not None
doy = ymode is not None
doex = xerr is not None
doey = yerr is not None
if clean is None:
if timing: tic3 = time.time()
if dox: exec ('xfunc = np.%s' % xmode) in locals()
if doy: exec ('yfunc = np.%s' % ymode) in locals()
for ii in range(nbins):
#index = inds==(ii+1)
if dox:
xx[ii] = dojob(xfunc, x, inds2[ii])
##xx[ii] = xfunc(x[index])
#xx[ii] = xfunc(x[inds2[ii][0]:inds2[ii][1]])
if doy:
yy[ii] = dojob(yfunc, y, inds2[ii])
##yy[ii] = yfunc(y[index])
#yy[ii] = yfunc(y[inds2[ii][0]:inds2[ii][1]])
if doex:
exx.append(doerrjob(x, xerr, xmode, inds2[ii]))
##exx.append(geterr(x[index], xerr, xmode))
#exx.append(geterr(x[inds2[ii][0]:inds2[ii][1]], xerr, xmode))
if doey:
eyy.append(doerrjob(y, yerr, ymode, inds2[ii]))
##eyy.append(geterr(y[index], yerr, ymode))
#eyy.append(geterr(y[inds2[ii][0]:inds2[ii][1]], yerr, ymode))
if timing: statstime += (time.time() - tic3)
#pdb.set_trace()
else:
if index: print "INDEX and CLEAN modes don't work together yet... Sorry!"
for ii in range(nbins):
if timing: tic1 = time.time()
#index = inds==(ii+1)
if timing: setuptime += (time.time() - tic1)
if timing: tic2 = time.time()
xdata = x[inds2[ii][0]:inds2[ii][1]]
if timing: xdatatime += (time.time() - tic2)
if timing: tic25 = time.time()
if ymode is None and yerr is None: # We're free to ignore the y-data:
ydata = arraynan
else: # We have to compute something with the y-data:
if clean is not None:
ydata, retstats = cleandata(y[inds2[ii][0]:inds2[ii][1]], clean, returnstats=True)
if returnstats:
final_count += retstats[1]
else: # We don't have to clean the data
ydata = y[inds2[ii][0]:inds2[ii][1]]
if returnstats:
final_count += ydata.size
if timing: ydatatime += (time.time() - tic25)
if timing: tic3 = time.time()
xx[ii] = getcenter(xdata,xmode)
if timing: tic4 = time.time()
yy[ii] = getcenter(ydata,ymode)
if timing: tic5 = time.time()
exx.append(geterr( xdata,xerr,xmode))
if timing: tic6 = time.time()
eyy.append(geterr( ydata,yerr,ymode))
if timing: tic7 = time.time()
if timing: statstime += (time.time() - tic3)
#exx[ii] = geterr( xdata,xerr,xmode)
#eyy[ii] = geterr( ydata,yerr,ymode)
if timing:
print "%1.3f sec for setting up bins & indices..." % setuptime
print "%1.3f sec for getting x data clean and ready." % xdatatime
print "%1.3f sec for getting y data clean and ready." % ydatatime
#print "%1.3f sec for computing x-data statistics." % (tic4-tic3)
#print "%1.3f sec for computing y-data statistics." % (tic5-tic4)
#print "%1.3f sec for computing x-error statistics." % (tic6-tic5)
#print "%1.3f sec for computing y-error statistics." % (tic7-tic6)
print "%1.3f sec for computing statistics........." % statstime
if timing:
print "%1.3f sec since starting function; uncertainties defined" % (time.time() - tic)
#xx = array(xx)
#yy = array(yy)
exx = np.array(exx).transpose() # b/c 2D if minmax option used
eyy = np.array(eyy).transpose() # b/c 2D if minmax option used
#pdb.set_trace()
if returnstats:
ret= xx,yy,exx,eyy,(init_count, final_count)
else:
ret = xx,yy,exx,eyy
#print 'tools: returnstats, len(ret)>>', returnstats, len(ret)
if timing:
print "%1.3f sec since starting function; returning" % (time.time() - tic)
return ret
def ploth(*args, **kw):
"""Plot 1D data in a histogram-like format. If x-coordinates are
specified, they refer to the centers of the histogram bars.
Uses same format as matplotlib.pyplot.plot. For example:
::
ploth(x, y) # plot x and y using solid linestyle (default)
ploth(x, y, 'bo') # plot x and y using blue circle markers w/no line
ploth(y) # plot y using x as index array 0..N-1
ploth(y, 'r*--') # ditto, but with red star corners and dashed line
:OPTIONS:
rot90 : bool
If True, data will be plotted histogram-style vertically,
rather than the standard horizontal plotting.
:REQUIREMENTS: :doc:`numpy`, :doc:`analysis`
"""
# 2009-09-17 09:26 IJC: Created
# 2012-09-27 19:19 IJMC: Added 'rot90' keyword
from numpy import arange, concatenate, vstack
from pylab import plot
if len(args)==1:
y=args[0]
ny = len(y)
x = arange(ny)
plotstr = '-'
elif len(args)==2 and args[1].__class__==str:
y=args[0]
ny = len(y)
x = arange(ny)
plotstr = args[1]
elif len(args)==2 and args[1].__class__<>str:
x = args[0]
y=args[1]
ny = len(y)
plotstr = '-'
elif len(args)>=3:
x = args[0]
y=args[1]
ny = len(y)
plotstr = args[1]
if kw.has_key('rot90') and kw['rot90']:
temp = x
x = y
y = temp
ny = len(y)
nx = len(x)
rot90 = kw.pop('rot90')
else:
rot90 = False
x1= 0.5*(x[1::]+x[0:ny-1])
xx = concatenate(([x[0]], vstack((x1,x1)).transpose().ravel(), [x[-1]]))
yy = vstack((y,y)).transpose().ravel()
if rot90:
phandle = plot(xx,yy,plotstr,**kw)
else:
phandle = plot(xx,yy,plotstr,**kw)
return phandle
def flatten(x, maxdepth=100):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
:OPTIONAL INPUTS:
maxdepth -- scalar
number of layers deep to dig. Seting to zero causes no flattening to occur.
:Examples:
::
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
# 2009-09-26 14:05 IJC: Taken from
# http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
# 2011-06-24 15:40 IJMC: Added maxdepth keyword
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring) and maxdepth>0:
maxdepth -= 1
result.extend(flatten(el, maxdepth=maxdepth))
else:
result.append(el)
return result
def fconvolve(a, v, oversamp=2):
"""Returns the discrete, linear convolution of 1-D sequences a and v,
using Fast Fourier Transforms. Restrictions are: a and v must
both be real-valued, and len(a)>len(v).
:REQUIREMENTS: :doc:`analysis`, :doc:`numpy`
"""
# 2009-10-29 11:00 IJC: Created
from analysis import pad
from numpy.fft import fft, ifft, fftshift
from numpy import real, array
a = array(a,copy=True).ravel()
v = array(v,copy=True).ravel()
na = len(a)
nv = len(v)
nfft = oversamp*na
a2 = pad(a, 1, nfft)[0,:]
v2 = pad(v, 1, nfft)[0,:]
fa2 = fft(a2)
fv2 = fft(v2)
ret = real(fftshift(ifft(fa2 * fv2)))
return pad(ret, 1, na).ravel()
def cplim(a1,a2):
"""Copy axis limits from one axes to another.
:INPUTS:
a1, a2 -- either (1) handles to axes objects, or (2) figure
numbers. If figures have subplots, you can refer to a
particular subplot using decimal notation. So, 1.3
would refer to subplot 3 of figure 1.
:REQUIREMENTS: :doc:`matplotlib` (when this is written...)
"""
# 2009-12-08 16:30 IJC: Had the idea...
print "To be written -- and what a great day it will be."
return
def legc(leg,col='color'):
"""Color legend text to match linecolor.
:Inputs:
'leg' is a legend object.
'col' sets the field of the leg.get_lines() objects to use to find
the color.
You may need to refresh the figure to see the changes."""
# 2009-12-14 09:50 IJC: Created
texts = leg.get_texts()
lines = leg.get_lines()
for label,line in zip(texts,lines):
label.set_color(line.get_color())
return leg
def keylist(filelist, keys):
"""Create an object based on FITS header keys extracted from a filelist.
:Inputs:
filelist -- sequence of strings representing filenames (for PyFITS)
keys -- sequence of strings representing header keys
#Keys not found in a file will result in the string value
:REQUIREMENTS: :doc:`pyfits`, :doc:`spitzer`
"""
try:
from astropy.io import fits as pyfits
except:
import pyfits
from spitzer import baseObject
# 2010-01-24 15:23 IJC: Created
# 2010-01-26 10:27 IJC: Solved a pernicious little problem: always
# call object creation w/parentheses!
obj = baseObject()
for k in keys:
exec('obj.%s=[]'%k)
for f in filelist:
h = pyfits.getheader(f)
for k in keys:
exec("obj.%s.append(h['%s'])" %(k,k) )
return obj
def plotc(x,y,z,**kw):
"""Plot x,y data with an evolving z component by changing its
color using a matplotlib colormap 'cm' object.
Will bomb if z elements are non-finite.
:OPTIONS:
z2 : array of same shape as x,y,z. This will be size; z will be color.
map : str
colormap to use for z
zmin, zmax : floats
maximum/minimum values of z for colorscale
sizenotcolor : bool
If True, 'z' specifies marker size not color.
z2 : array for sizes
z2min, z2max : floats
colorbar : bool
reversecolorbar : bool
Make small numbers on top (e.g., for magnitudes)
others : various
Any options passable to matplotlib's :func:`plot`
:SEE ALSO:
:func:`contourg`, :func:`matplotlib.colormaps`
:RETURNS:
fig, axes, plotlines
:REQUIREMENTS: :doc:`pylab`
"""
# 2010-02-08 16:47 IJC: Created
# 2011-09-07 10:51 IJMC: Added zmin, zmax options. And use
# variable-number of keywords now.
# 2013-10-09 17:16 IJMC: Added sizenotcolor option.
# 2014-10-06 10:50 IJMC: Updated 'see also' note.
# 2016-12-02 14:25 IJMC: Now do a colorbar -- finally!
import pylab as py #from pylab import plot, cm, array
defaults = dict(map='Blues', zmin=None, zmax=None, z2min=None, z2max=None, linestyle='None', marker='o', sizenotcolor=False, z2=None, colorbar=False, reversecolorbar=False)
for key in defaults:
if (not kw.has_key(key)):
kw[key] = defaults[key]
map = kw.pop('map')
zmin = kw.pop('zmin')
zmax = kw.pop('zmax')
z2min = kw.pop('z2min')
z2max = kw.pop('z2max')
sizenotcolor = kw.pop('sizenotcolor')
z2 = kw.pop('z2')
colorbar = kw.pop('colorbar')
reversecolorbar = kw.pop('reversecolorbar')
try:
cmap = eval('py.cm.'+map)
except:
print "Colormap %s not found -- exiting!" % map
return -1
if not sizenotcolor:
if zmin is None:
zmin = z.min()
if zmax is None:
zmax = z.max()
zscale = cmap( ((z-zmin) / (zmax-zmin) * cmap.N).astype(int))
if z2 is not None:
if z2min is None:
z2min = z2.min()
if zmax is None:
z2max = z2.max()
z2scale = z2.copy()
z2scale[z2scale>=z2max] = z2max
z2scale[z2scale<=z2min] = z2min
plotlist = []
if z2 is None:
zipparam = zip(x, y, zscale)
else:
zipparam = zip(x, y, zscale, z2scale)
print kw
if colorbar:
fig = py.figure()
axs = [py.subplot(111, position=[.12, .12, .7, .8])]
else:
fig = py.gcf()
axs = [py.gca()]
for thisparam in zipparam:
if z2 is None:
xx,yy,param = thisparam
else:
xx,yy,param,size = thisparam
if sizenotcolor:
kw['ms'] = param
else:
kw['color'] = param
if z2 is not None: kw['ms'] = size
plotlist.append(py.plot([xx],[yy],**kw))
if colorbar:
axs.append(py.subplot(111, position=[.84, .12, .07, .8]))
ztemp = np.linspace(zmin, zmax, 101)
py.imshow(np.tile(ztemp, (2,1)).T, cmap=cmap, aspect='auto')
axs[-1].set_ylim([0,int(ztemp.size)-1])
zticks = np.interp(axs[-1].get_yticks(), np.arange(ztemp.size), ztemp)
if reversecolorbar: axs[-1].set_ylim(axs[-1].get_ylim()[::-1])
axs[-1].set_yticklabels(['%i' % el for el in zticks])
axs[-1].set_xticklabels([])
axs[-1].get_yaxis().set_ticks_position('right')
#axs[-1].set_xlabel('$T_{eff}$ [K]', fontsize=fs*1.1)
return fig, axs, plotlist
def hist2d(x,y, bins=None):
"""Compute 2-d histogram data for specified bins.
:INPUT:
x
y
:OPTIONAL INPUT:
bins: a two-tuple containing one of the following:
(nx,ny) -- tuple, number of bins in each direction
(xlims, ylims) -- tuple of sequences (x- and y-bin edges)
:OUTPUT:
A 3-tuple consisting of:
xbins -- the centers of the x bins
ybins -- the centers of the y bins
hist -- The 2D histogram array
:SEE ALSO: :func:`numpy.histogram2d`
:REQUIREMENTS: :doc:`numpy`
"""
# 2010-02-25 17:26 IJC: Created from my old Hess Diagram fcn.
# 2010-03-04 13:59 IJC: Fixed a typo -- now actually returns bin centers
from numpy import arange, array, zeros, isfinite, linspace
x = array(x).ravel()
y = array(y).ravel()
x = x[isfinite(x)]
y = y[isfinite(y)]
if bins==None:
bins = [20,20]
if hasattr(bins,'__iter__'):
if len(bins)<2:
print "bins must have len>2"
return -1
else:
print "bins must have len>2"
return -1
# Test X bins
if hasattr(bins[0],'__iter__'): # sequence of limits
nx = len(bins[0])-1
xlims = bins[0]
else:
nx = bins[0]
xlims = linspace(x.min(), x.max(), nx+1)
# Test Y bins
if hasattr(bins[1],'__iter__'): # sequence of limits
ny = len(bins[1])-1
ylims = bins[1]
else:
ny = bins[1]
ylims = linspace(y.min(), y.max(), ny+1)
xcen = zeros(nx,float)
ycen = zeros(ny,float)
hist = zeros((ny,nx),int)
for ii in range(nx):
xindex = (x>xlims[ii]) * (x<xlims[ii+1])
xcen[ii] = 0.5*(xlims[ii]+xlims[ii+1])
for jj in range(ny):
ycen[jj] = 0.5*(ylims[jj]+ylims[jj+1])
yindex = (y>ylims[jj]) * (y<ylims[jj+1])
index = xindex * yindex
hist[jj,ii] = index.sum()
return (xcen, ycen, hist)
def plotcorrs(params, labs=None, tit=None, xrot=0, yrot=0, cmap=None,figsize=None,plotregion=[.1,.1,.8,.8], n=6, nbins=None, clim=None, docontour=False, contourcolor='k', newfig=True, getdist=False, gd_smooth=0.2, plotmid=None, fontsize=12):
""" Plot correlation coefficient matrix in one big, huge, honkin'
figure. Color-code the figure based on the correlation
coefficients between parameters.
:INPUTS:
params -- (M x N array) M instantiations of a set of N parameters.
:OPTIONS:
labs -- (list) labels for each of the N parameters
tit -- (str) title for figure
xrot/yrot -- (float) axis label rotation, in degrees
cmap -- (matplotlib cm) -- colormap for color-coding.
figsize -- (2-list) -- width and height of figure
plotregion -- (4-list) -- (left, bottom, width, height) of plotted region in each figure
n -- (int) -- number of subplots across each figure
nbins : int
Bin the data into this many bins, and show 2D histograms instead of points.
clim : None
Colorscale limits for normalized 2D histograms (where hist.sum() = 1.0)
docontour : bool
Whether to plot contours, or do an 'imshow'
newfig : bool
Whether to generate a new figure, or plot in the current axes.
contourcolor
Color of contour line, if "docontour" is set to a list of confidence intervals.
:REQUIREMENTS: :doc:`pylab`, :doc:`nsdata`
:NOTES:
Based on the concept by Nymyer and Harrington at U. Central Florida
Beware of plotting two many points, and clogging your system!
"""
# 2010-05-27 09:27 IJC: Created
# 2010-08-26 14:49 IJC: Added test for higher-rank dimensional
# 2010-08-27 09:21 IJC: Moved 'tit' title text lower
# 2011-11-03 12:03 IJMC: Added 'nbins' option
# 2012-03-30 09:04 IJMC: Moved axes labels to upper-right, rather than lower-left.
# 2013-08-19 13:17 IJMC: Added 'docontour' option.
# 2013-10-09 14:59 IJMC: Added 'newfig' option.
# 2015-11-15 19:59 IJMC: Added 'getdist' option -- much prettier!
import pylab as py
import nsdata as ns
import kdestats as kde
n = int(n)
n, m = params.shape
if n>=m:
npts0 = n
nparam = m
else:
npts0 = m
nparam = n
params = params.copy().transpose()
if nbins is not None:
nbins = int(nbins)
hist_bins = [py.linspace(min(params[:,ii]), max(params[:,ii]), nbins+1) for ii in range(nparam)]
hist_cmap = cmap
nind = params.shape[1]
if labs is None:
labs = ['']*nind
if figsize is None:
figsize = [9,9]
nperpage = min(n,nind-1)
nfigs = py.ceil(float(nind-1.)/nperpage).astype(int)
#print "nind, nperpage, nfigs>>",nind, nperpage, nfigs
subx0,suby0, xwid,ywid = plotregion
subdx = xwid/(nperpage) # was nind-1.
subdy = ywid/(nperpage) # was nind-1.
#print "subx0,suby0,subdx,subdy>>",[subx0,suby0,subdx,subdy]
oldaxlinewidth = py.rcParams['axes.linewidth']
if nind>40:
py.rcParams['axes.linewidth'] = 0
figs = []
allsubplots = []
if getdist:
from getdist import plots, MCSamples
from analysis import dumbconf
g = plots.getSubplotPlotter()
gd_labels = labs
samps = MCSamples(samples=params, names=gd_labels, labels=gd_labels)
samps.updateSettings({'contours': [0.683, 0.954, 0.997]})
g.settings.num_plot_contours = 3
samps.smooth_scale_2D = gd_smooth
g.triangle_plot([samps], filled=True, contour_colors=contourcolor, linewidth=2, contour_lws=[2]*len(labs), figsize=figsize)
figs.append(py.gcf())
figs[-1].set_size_inches(figsize)
allsubplots.append(figs[-1].get_axes())
[ax.get_xaxis().get_label().set_fontsize(fontsize) for ax in allsubplots[-1]]
[ax.get_yaxis().get_label().set_fontsize(fontsize) for ax in allsubplots[-1]]
if plotmid is None:
plotmid = np.zeros(len(gd_labels), float)
for ii, key in enumerate(gd_labels):
plotmid[ii] = np.median(params[:,ii])
else:
plotmid = np.array(plotmid, copy=False)
hivals = np.array([dumbconf(vec, .84134, type='upper')[0] for vec in params.T]) - plotmid
lovals = plotmid - np.array([dumbconf(vec, .15866, type='upper')[0] for vec in params.T])
#for k in range(len(x_mean)):
# g.add_x_bands(x_mean[k], x_var[k]**0.5, ax=allsubplots[0][k], alpha2=0.25, color=contourcolor, linewidth=2)
maxlab = '%' + str(1+max(map(len, gd_labels))) + 's : '
textparams = [maxlab % el for el in gd_labels]
for ii in range(len(gd_labels)):
textvals = roundvals([plotmid[ii], hivals[ii], lovals[ii]])
textparams[ii] += ' $%s^{+%s}_{-%s}$ ' % tuple(textvals)
if n>3:
newpos = [.62, .62, .35, .35]
else:
newpos = [.62, .7, .35, .27]
newax = py.subplot(111, position=newpos)
out = textfig(['\n']+textparams, ax=newax, fig=figs[-1],fontsize=fontsize*1.2)
else:
# Iterate over figure columns
for kk2 in range(nfigs):
# Iterate over figure rows
for kk1 in range(nfigs):
if newfig:
f=py.figure(nextfig(),figsize)
else:
f = py.gcf()
subplots = []
jj0 = 0
#Iterate over subplot columns:
for jj in range(nperpage*kk2,min(nperpage*(kk2+1),nind)):
# Set the vertical panel offset:
if kk1==kk2: # a figure on the diagonal
ii0 = jj0+1
elif kk1>kk2: # a figure below the diagonal
ii0 = 1
#Iterate over subplots rows:
for ii in range(max(jj+1,nperpage*kk1+1), min(nperpage*(kk1+1)+1,nind)):
#print '(kk2,kk1,jj,jj0,ii,ii0): (%i,%i,%i,%i,%i,%i)'%(kk2,kk1,jj,jj0,ii,ii0), [subx0+subdx*jj0,suby0+subdy*(nperpage-ii0),subdx,subdy]
s = py.axes([subx0+subdx*jj0,suby0+subdy*(nperpage-ii0),subdx,subdy])
param_doesnt_vary = params[:,jj].std()==0 or params[:,ii].std()==0 or \
(py.np.abs(params[:,jj].std()/py.median(params[:,jj])) < 1e-9) or \
(py.np.abs(params[:,ii].std()/py.median(params[:,ii])) < 1e-9)
if nbins is None or param_doesnt_vary:
py.plot(params[:,jj],params[:,ii],',k')
#pdb.set_trace()
else:
#pdb.set_trace()
thishist = py.histogram2d(params[:,jj], params[:,ii], \
bins=[hist_bins[jj], hist_bins[ii]])
if docontour:
xplot = 0.5*(thishist[1][1:] + thishist[1][0:-1])
yplot = 0.5*(thishist[2][1:] + thishist[2][0:-1])
if hasattr(docontour, '__iter__'):
clev = [kde.confmap(1.0*thishist[0]/npts0, thisDC) for thisDC in docontour]
py.contour(xplot, yplot, 1.0*thishist[0].transpose()/npts0, clev, colors=contourcolor, linewidths=2)
else:
py.contourf(xplot, yplot, 1.0*thishist[0].transpose()/npts0, cmap=hist_cmap)
h_axis = py.xlim() + py.ylim()
else:
ns.imshow(1.0*thishist[0].transpose()/npts0, x=thishist[1], y=thishist[2], cmap=hist_cmap)
h_axis = py.xlim() + py.ylim()[::-1]
#pdb.set_trace()
py.axis(h_axis)
if clim is None:
py.clim([0., thishist[0].ravel().max()*1.0/npts0])
else:
py.clim(clim)
if jj0>0: #
s.set_yticklabels('');
else:
#py.ylabel(labs[ii], rotation=yrot)
pass
if newfig: s.set_yticks(s.get_yticks()[1:-1]);
if ii0 == (jj0+1):
s.get_xaxis().set_label_position('top')
py.xlabel(labs[jj])
s.get_yaxis().set_label_position('right')
py.ylabel(labs[ii])#, rotation='horizontal')
s.get_yaxis().get_label().set_rotation(90)
if ii0<(nperpage-1) and ii<(nind-1):
s.set_xticklabels('');
else:
#py.xlabel(labs[jj],rotation=xrot)
s.get_xaxis().set_major_formatter(py.FormatStrFormatter('%01.2f'));
if newfig: s.set_xticks(s.get_xticks()[1:-1]);
if nperpage>10:
s.set_xticklabels('');
s.set_yticklabels('');
if nperpage>50:
s.set_xticks([])
s.set_yticks([])
else:
[obj.set_rotation(90.) for obj in s.get_xticklabels()] ;
if cmap is not None:
s.set_axis_bgcolor(cmap(.3+.7*abs(py.corrcoef(params[:,jj],params[:,ii])[0,1])))
#py.title('(kk2,kk1,jj,jj0,ii,ii0): (%i,%i,%i,%i,%i,%i)'%(kk2,kk1,jj,jj0,ii,ii0))
if nbins is not None and (not param_doesnt_vary):
py.axis(h_axis)
subplots.append(s)
ii0 += 1
jj0+=1
figs.append(f)
allsubplots.append(subplots)
if tit is not None:
f.text(.5,.9,tit,fontsize=24,horizontalalignment='center')
py.draw()
py.rcParams['axes.linewidth'] = oldaxlinewidth
for ff,ss in zip(figs,allsubplots):
if len(ss)==0:
py.close(ff)
return figs, allsubplots
def pparams(params, **args):
"""Take a set of parameters and plot them. Assume that the larger
of the array's two dimensions is N (the number of instantiations)
and the smaller is M (the number of parameters).
If 'params' is a 3D array, then we assume these are in the form of
an emcee.sampler.chain instance: axes are (chains, steps,
parameters).
:OPTIONS:
npts: is not None, then pick only every (N/npts)th point.
if newfig is False, plot into the current figure.
figsize: 2-tuple; figure size (in inches)
newfig: bool; whether to create a new figure or not
labs: optional list of labels for each parameter
:REQUIREMENTS: :doc:`numpy`, :doc:`pylab`
"""
# 2012-02-17 09:45 IJMC: Added labs option
# 2016-06-20 10:49 IJMC: Added '3D' array functionality.
from numpy import sqrt
from pylab import figure, plot, subplot, xticks, gcf, title
defaults = dict(npts=None, figsize=[15,10],newfig=True, labs=None)
if args.has_key('npts'):
npts = args['npts']
else:
npts = defaults['npts']
if args.has_key('figsize'):
figsize = args['figsize']
else:
figsize = defaults['figsize']
if args.has_key('newfig'):
newfig = args['newfig']
else:
newfig = defaults['newfig']
if args.has_key('labs'):
labs = args['labs']
else:
labs = defaults['labs']
if params.ndim==2:
n, m = params.shape
elif params.ndim==3:
o,n,m = params.shape
sub_args = args.copy()
sub_args['newfig'] = False
fig = pparams(params[0], **args)
[pparams(params[ii], **sub_args) for ii in range(o)]
return fig
if n>=m:
npts0 = n
nparam = m
else:
npts0 = m
nparam = n
params = params.copy().transpose()
ndown = sqrt(nparam).astype(int)
nacross = int(1.0*nparam/ndown)
if ndown*nacross<nparam:
nacross +=1
if npts is None:
npts = npts0
sampling = max(int(npts0/npts), 1)
if newfig==True:
fig = figure(nextfig(), figsize)
else:
fig = gcf()
for ii in range(nparam):
subplot(ndown,nacross, ii+1)
plot(params[::sampling,ii]); xticks([])
if labs is not None:
title(labs[ii])
return fig
def hparams(params, nbins=10, figsize=[15,10], normed=False,newfig=True, labs=None, cumulative=False, minorticks=True, plotconf=.683, plotmid=True, color=None):
"""Take a set of parameters and histogram them. Assume that the larger
of the array's two dimensions is N (the number of instantiations)
and the smaller is M (the number of parameters).
:Options:
nbins sets the number of bins
normed sets the histogram normalization
if newfig is False, plot into the current figure.
labs is a list of string labels
cumulative plots normalized cumulative distribution function
minorticks displays minor tick marks
plotconf displays confidence levels at the desired (fractional)
threshold. E.g., plotconf=0.683 displays the one-sigma
confidence limits. Set to 'None' for no confidence levels.
plotmid plots nothing (if False), the median (if True), or the
specified values (if a sequence). Defaults to median if
plotconf is True.
color sets the plotting color.
:Requirements: :doc:`numpy`, :doc:`pylab`
"""
# 2010-11-30 12:11 IJC: Now xticks are rotated 90 degrees.
# 2012-01-22 16:35 IJMC: Added 'labs' option
# 2012-03-26 13:09 IJMC: Added 'cumulative' and 'minorticks' options
# 2012-05-10 16:30 IJMC: Added plotconf and plotmid options.
# 2013-03-11 08:45 IJMC: Added color option.
# 2014-07-12 17:29 IJMC: Changed defaults to show conf. & median.
from numpy import sqrt
from pylab import figure, gcf, subplot, draw
from analysis import dumbconf
n, m = params.shape
if n>=m:
npts0 = n
nparam = m
else:
npts0 = m
nparam = n
params = params.copy().transpose()
if cumulative:
normed=True
if (plotmid is True) or ((plotconf is not None) and (not hasattr(plotmid, '__iter__'))):
plotmid = np.median(params, axis=0)
ndown = sqrt(nparam).astype(int)
nacross = int(1.0*nparam/ndown)
if ndown*nacross<nparam:
nacross +=1
if newfig==True:
fig = figure(nextfig(), figsize)
else:
fig = gcf()
histoptions = dict(histtype='step', cumulative=cumulative, normed=normed)
if color is not None:
histoptions['color'] = color
for ii in range(nparam):
sax = subplot(ndown,nacross, ii+1)
sax.hist(params[:,ii],nbins, **histoptions)
if cumulative:
sax.set_ylim([0,1])
else:
sax.set_ylim([0, sax.get_ylim()[1]])
sax.set_yticklabels([])
if minorticks:
sax. minorticks_on()
[tick.set_rotation(90.) for tick in sax.get_xaxis().get_ticklabels()]
if labs is not None:
sax.text(.05, .9, labs[ii], verticalalignment='top', horizontalalignment='left', transform=sax.transAxes)
if plotmid is not False:
sax.plot([plotmid[ii]]*2, sax.get_ylim(), '-k')
if plotconf is not None:
x0 = plotmid[ii]
xup = -x0 + dumbconf(params[:,ii], (1.-plotconf)/2., mid=x0, type='lower')[0]
xdn = x0 - dumbconf(params[:,ii], (1.-plotconf)/2., mid=x0, type='upper')[0]
if xup==0 or xdn==0:
ndigit = 1
else:
ndigit = np.abs(int(np.floor(min(np.log10(np.abs(xup)), np.log10(np.abs(xdn)))) - 1))
precstr = '%' + ('1.%i' % ndigit) + 'e'
sax.plot([x0-xdn]*2, sax.get_ylim(), '--k')
sax.plot([x0+xup]*2, sax.get_ylim(), '--k')
sax.set_ylabel((precstr + '\n(+' + precstr + ' / -' + precstr + ')\n') % (x0, xup, xdn), horizontalalignment='center')
draw()
return fig
def getparams(params, chisq=None, conf=[.683]):
"""Find confidence levels and optimal parameters.
If chisq is None:
Take the median of each parameter, and compute the upper and
lower confidence limits using the parameter distributions and
this model.
Else:
Take a set of fit parameters and associated chi-squared values.
Take as the optimum model that model with the MEDIAN chi-squared
value. Compute upper and lower confidence limits using the
parameter distributions and this optimum model.
:INPUTS:
params : N-D numpy array
model parameters
chisq : 1-D numpy array
chi-squared values.
conf : sequence or float
confidence levels to report
:OUTPUTS:
:SEE ALSO:
:func:`analysis.dumbconf`
:REQUIREMENTS: :doc:`numpy`
"""
# 2010-07-20 11:37 IJC: Created
# 2010-10-28 12:01 IJC: Updated documentation
from numpy import sqrt, median, array, ceil,sort,nonzero,floor
if len(params.shape)==1:
params = params.reshape(len(params),1)
n, m = params.shape
if chisq is None:
if m>n:
params = array(params,copy=True).transpose()
nmod = params.shape[0]
else:
nmod = len(chisq)
if n==nmod:
pass
else:
params = array(params,copy=True).transpose()
# Don't allow an even number of parameter sets
if nmod/2==nmod/2.:
params = array(params,copy=True)[1::]#[0:nmod-1,:]
if chisq is None:
pass
else:
chisq = array(chisq,copy=True)[1::]#[0:nmod-1]
nmod -= 1
nparam = params.shape[1]
ret = []
if chisq is None:
medmodel = median(params,0)
else:
medmodel = params[chisq==median(chisq),:].ravel()
for ii in range(nparam):
thisret = [medmodel[ii]]
for thisconf in conf:
thisconf = abs(thisconf)
sorted_param = sort(params[:,ii])
mid_index = nonzero(sorted_param==medmodel[ii])[0].mean()
n_offset = nmod*thisconf/2.
upper_index = ceil(min(mid_index+n_offset,nmod-1)).astype(int)
lower_index = floor(max(mid_index-n_offset,0)).astype(int)
upper = sorted_param[upper_index]-medmodel[ii]
lower = sorted_param[lower_index]-medmodel[ii]
thisret += [upper, lower]
ret.append(thisret)
return ret
def combinations(input_list):
"""Return all possible combinations of the elements in an input
sequence. The last returned element will be the empty list.
E.g., combinations([0,1]) returns [[0, 1], [0], [1], []]
Taken from the internet:
http://desk.stinkpot.org:8080/tricks/index.php/2008/04/get-all-possible-combinations-of-a-lists-elements-in-python/
:Requirements: :doc:`copy`
"""
# 2010-10-09 17:06 IJC: Created from internet
import copy
swap_list_list = [[]]
for swap in input_list:
temp_lists = []
for list_stub in swap_list_list:
this_list = copy.copy(list_stub)
this_list.append(swap)
temp_lists.append(this_list)
temp_lists.append(list_stub)
swap_list_list = temp_lists
return swap_list_list
def pcmodelxcorr(pcaout, data, model, npcs=6, nblock=1000, xl=50, modstr='model', titstr=''):
"""Plot cross-correlations between projection of principal components
onto data and a model.
:INPUTS:
pcaout -- output from pcsa.pca, but its only important component
is that pcaout[2] is an array of PC eigenvectors; the
strongest vector would be pcaout[2][:,-1], etc.
data -- numpy array. Should be shape N x M -- N observations of M
variables, arranged in L blocks, and should have been
mean-subtracted prior to PCA (i.e., data -= data.mean(0))
model -- numpy array. Should be shape M.
npcs -- int. Number of principal components to cross-correlate
with.
nblock -- int. NUmber of channels to use at a time in
correlations. Must be an integral divisor of
data.shape[1]
xl -- int. +/- X-limits to display in plots.
:Requirements: :doc:`pylab`, :doc:`numpy`, :doc:`pcsa`
Usage is pretty specific to echelle-data
"""
# 2010-10-15 11:10 IJC: Created
import pylab as py
from numpy import abs
from pcsa import pca_project
import pdb
ps = [pca_project(pcaout[2][:,-ii], data-data.mean(0))[0] \
for ii in range(1,npcs+1)]
nobs, ndat = data.shape
nord = ndat/nblock
figs = []
maxlag = nblock/2
lags = py.arange(-maxlag+1, maxlag)
for ii in range(npcs):
figs.append(py.figure())
x_mod = [py.xcorr(pcp-pcp.mean(), mod-mod.mean(), maxlags=maxlag-1)[1] \
for pcp, mod in zip(ps[ii].reshape(nord,nblock), \
model.reshape(nord, nblock))]
py.close()
pdb.set_trace()
xc_gmean = abs(py.array(x_mod).prod(0))**(1./nord)
xc_amean = py.array(x_mod).mean(0)
py.figure()
py.subplot(211)
py.semilogy(lags, xc_gmean)
py.ylabel('geometric mean' )
py.xlabel('lags')
py.xlim([-50,50])
py.ylim([xc_gmean[abs(lags)<xl].min(), xc_gmean.max()])
py.title(titstr + '\n' + modstr + ' and PC #%i' % ii)
py.subplot(212)
py.plot(lags, xc_amean)
py.ylabel('arithmetic mean' )
py.xlabel('lags')
py.xlim([-xl,xl])
return figs
def dcf(t, x, y, zerolag=True, nbin=11, binwidth=None, bins=None, prebin=None, reterr=False):
"""Compute the Discrete Correlation Function for unevenly sampled data.
If your data are evenly sampled, just use :func:`numpy.correlate`!
:INPUTS:
t -- (1D sequence) - time sampling of input data.
x, y -- (1D sequences) - input data.
Note that t, x, y should all have the same lengths!
:OPTIONAL INPUT:
zerolag -- (bool) - whether to compute DCF for zero-lag datapoints.
nbin -- (int) - number of computed correlation/lag values to average over
binwidth -- (float) - width of bins to average over, in units
of t. If set, this overrides nbin.
bins -- (sequence) - edges of bins to use in averaging DCF
values. If set, this overrides both nbin
and binwidth.
prebin -- (scalar) - factor by which to bin down initial data
and lag pairs. This translates into a
speed boost of about this same factor.
reterr -- bool - False or True
:RETURNS:
meanlags -- average lag in each averaged bin
rdcf -- DCF value in each averaged bin
rdcf_err -- uncertainty in the DCF value for each averaged bin
:SEE ALSO: :func:`numpy.correlate`
:REQUIREMENTS: :doc:`analysis`, :doc:`numpy`
"""
# 2010-11-12 IJC: Created
# 2010-11-17 09:25 IJC: Added 'bins' option
# 2011-03-22 13:54 IJC: Added 'prebin' option
# 2012-03-21 13:25 IJMC: Switched "import *" to "import array, " ... etc.
# 2014-12-19 08:21 IJMC: Added 'reterr' option
import analysis as an
#from numpy import array, meshgrid, nonzero, arange, argsort, isfinite
import pdb
t = np.array(t)
x = np.array(x, copy=True)
y = np.array(y, copy=True)
nx, ny = x.size, y.size
npts = max(nx, ny)
x -= x.mean()
y -= y.mean()
xx, yy = np.meshgrid(x, y)
sx = x.std()
sy = y.std()
# Generate data-pair indices:
if zerolag:
pairs1, pairs2 = np.nonzero(np.ones((npts, npts), bool))
else:
xind, yind = np.meshgrid(np.arange(npts), np.arange(npts))
pairs1, pairs2 = np.nonzero(xind<>yind)
del xind
del yind
# Compute lags:
tt, tt2 = np.meshgrid(t, t)
lags = (tt-tt2)[pairs1, pairs2]
del tt
del tt2
uij = (xx * yy)[pairs1, pairs2] / (sx * sy)
del xx
del yy
tind = np.argsort(lags)
lags = lags[tind]
uij = uij[tind]
del tind
#pdb.set_trace()
# The regular "DCF" uncertainty is just the standard deviation of the mean:
#if bins is not None:
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, bins, xerr='minmax', yerr='sdom')
#elif binwidth is None:
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, None, xerr='minmax', yerr='sdom', binfactor=nbin)
#else:
# bins = arange(lags.min(), lags.max() + binwidth, binwidth)
# meanlags, rdcf, meanlags_width, rdcf_err = \
# errxy(lags, uij, bins, xerr='minmax', yerr='sdom')
if prebin>1:
lags = an.binarray(lags, 2)
uij = an.binarray(uij, 2)
if reterr:
yerr = 'sdom'
else:
yerr = None
if bins is not None:
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, bins, xerr=None, yerr=yerr)
elif binwidth is None:
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, None, xerr=None, yerr=yerr, binfactor=nbin)
else:
bins = np.arange(lags.min(), lags.max() + binwidth, binwidth)
meanlags, rdcf, meanlags_width, rdcf_err = \
errxy(lags, uij, bins, xerr=None, yerr=yerr)
finite_ind = np.isfinite(meanlags) * np.isfinite(rdcf)
if reterr:
finite_ind *= np.isfinite(rdcf_err)
meanlags = meanlags[finite_ind]
rdcf = rdcf[finite_ind]
if reterr:
rdcf_err = rdcf_err[finite_ind]
return meanlags, rdcf, rdcf_err
def getfilelist(path='.', includes=[], excludes=[]):
"""Return a list of filenames meeting certain criteria.
:INPUTS:
path -- (str) path to directory to be scanned
includes -- (list of strs) -- all strings in this list must be
present in a filename to be returned
excludes -- (list of strs) -- any string in this list will
prevent a filename from being returned
"""
# 2011-01-30 22:20 IJC: Created
import os
f = os.popen('ls %s/' % path)
filenames = [os.path.split(line)[1].strip() for line in f.readlines()]
f.close()
filtered_filenames = []
if len(includes)>0:
for fn in filenames:
string_found = True
for incstr in includes:
if fn.find(incstr)==-1:
string_found = False
if string_found:
filtered_filenames.append(fn)
if len(excludes)>0:
returned_filenames = []
for fn in filtered_filenames:
file_excluded = False
for excstr in excludes:
if fn.find(excstr)>-1:
file_excluded = True
if not file_excluded:
returned_filenames.append(fn)
else:
returned_filenames = filtered_filenames
return returned_filenames
def loadpickle(filename,mode='both'):
"""Load a pickle from a given filename. If it can't be loaded by
pickle, return -1 -- otherwise return the pickled object.
mode : str
'dill', 'pickle', or 'both' to try both
E.g.,
data = tools.loadpickle(filename)"""
# 2011-02-10 15:45 IJC: Created
# 2014-08-29 19:25 IJMC: First, try dill:
# 2014-12-31 17:03 IJMC: Added 'dill' vs 'pickle' option
if mode=='both':
import dill
import pickle
elif mode=='dill':
try:
import dill as pickle
except:
import pickle
elif mode=='pickle':
import pickle
good = True
try:
f = open(filename, 'r')
except:
print "Could not open file: %s" % filename
good = False
if good:
try:
ret = pickle.load(f)
except:
good = False
if mode=='both':
try:
ret = dill.load(f)
good = True
except:
print "Mode 'both' failed to load file:"
good = False
if not good:
print "Could not load pickle from %s" % filename
try:
f.close()
except:
print "Could not close file %s" % filename
good = False
if good:
pass
else:
ret = -1
return ret
def savepickle(obj, filename):
"""Save a pickle to a given filename. If it can't be saved by
pickle, return -1 -- otherwise return the file object.
To save multiple objects in one file, use (e.g.) a dict:
tools.savepickle(dict(a=[1,2], b='eggs'), filename)
"""
# 2011-05-21 11:22 IJMC: Created from loadpickle.
# 2011-05-28 09:36 IJMC: Added dict example
# 2014-08-27 12:47 IJMC: By default, try to use Dill:
try:
import dill as pickle
except:
import pickle
good = True
try:
f = open(filename, 'wb')
except:
print "Could not open file: %s : for writing." % filename
good = False
if good:
try:
ret = pickle.dump(obj, f)
except:
print "Could not write object to pickle file: %s" % filename
good = False
try:
f.close()
except:
print "Could not close pickle file %s" % filename
good = False
if good:
pass
else:
f = -1
return f
def dict2obj(dic):
"""Take an input Dict, and turn it into an object with fields
corresponding to the dict's keys.
:SEE_ALSO:
:func:obj2dict`
"""
# 2011-02-17 09:41 IJC: Created
from spitzer import baseObject
ret = baseObject()
if not isinstance(dic, dict):
print "Input was not a Python dict! Exiting."
else:
for key in dic.keys():
exec('ret.%s = dic["%s"]' % (key, key))
return ret
def obj2dict(object, ignore=('_',), verbose=False):
"""Convert an object into a dict. Ignore functions & methods, and
any attributes starting with the 'ignore' keys.
:SEE_ALSO:
:func:`dict2obj`
"""
# 2014-08-29 19:34 IJMC: Created.
import inspect
ret = dict()
for attribute in dir(object):
for ign in ignore:
if attribute.find(ign)==0:
parseIt = False
if verbose:
print "Ignoring attribute '%s' in input object." % attribute
else:
parseIt = True
if parseIt:
val = getattr(object, attribute)
if not inspect.isroutine(val):
ret[attribute] = val
elif verbose:
print "Attribute '%s' in input object is a routine; ignoring it.." % attribute
return ret
#def loadspectrum(filename, lambdascale=1., limits=None, skiprows=None, lamdacol=0, datacol=1 ):
# """Load a spectrum from a FITS or ASCII file, and return the
# wavelength and data in two vectors.#
#
# :INPUTS:
#
# filename (str) -- filename to load. If extension is "FITS"
# load using pyfits, otherwise try to load as
# an ASCII file.
#
# lambdacol (int) -- column number
def find_peaks(vec, sep=0, thresh=None):
"""
Find all large values in input vector that are separated by at least
'wid' pixels.
:INPUTS:
vec (sequence) -- 1D vector
sep (scalar) -- minimum separation of returned peaks
thresh (scalar) -- ignore all peaks lower than this value.
:EXAMPLE:
import pylab as py
import tools
x = py.linspace(0, 10, 100) # Generate fake time index
y = py.sin(6.28*x/10.) + py.sin(6.28*x/2.) # Generate fake data
peakvals, peaklocs = tools.find_peaks(y, sep=10) # Find the peaks
py.plot(x, y, '-', x[peaklocs], peakvals, 'or') # Plot them
:RETURNS:
peakvals, peakindices
"""
# 2011-03-22 14:54 IJC: Created
# 2012-08-09 22:51 IJMC: Added thresh option.
import numpy as np
from pylab import find
import pdb
if thresh is None:
thresh = -np.inf
vec = np.array(vec)
npix = len(vec)
sep = np.floor(sep).astype(int)
available_index = np.ones(npix, bool)
peakvals = np.zeros(npix, float)
peakindices = np.zeros(npix, float)
npks = 0
inds = np.arange(npix, dtype=int)
while available_index.any():
#pdb.set_trace()
this_peak = vec[available_index].max()
this_location = inds[available_index][vec[inds[available_index]] == vec[inds[available_index]].max()][0]
available_index[max(0, this_location - sep):min(npix, this_location + sep)+1] = False
if this_peak >= thresh:
peakvals[npks] = this_peak
peakindices[npks] = this_location
npks += 1
peakvals = peakvals[0:npks]
peakindices = peakindices[0:npks].astype(int)
return peakvals, peakindices
def addobj(obj1, obj2, exclude='TBD'):
"""Combine fields in two objects with the same attributes. A
handy function!
:INPUTS:
obj1, obj2 : objects of the same kind
:RETURNS:
obj12 : new object, with fields of 1 and 2 combined.
OR
-1, if the objects have absolutely no attributes in common.
:NOTES:
Methods/attributes beginning with an underscore (_) are not combined.
"""
# 2011-05-25 10:11 IJMC: Created
# 2011-06-01 15:15 IJMC: Fixed a bug in concatenate(common_dim)
import copy
import numpy
# Helper function:
def combine_atts(a1, a2):
"""Combine two objects, as best you can!
If they are of different classes, then return None.
If they are non-iterable, combined them in a 2-tuple."""
combined = False
if a1.__class__ <> a2.__class__:
ret = None
combined = True
if ((not hasattr(a1, '__iter__')) or isinstance(a1, list) or isinstance(a1, str)) \
and (not combined):
try:
ret = a1 + a2
except:
ret = (a1, a2)
combined = True
if isinstance(a1, numpy.ndarray) and (not combined):
sh1, sh2 = a1.shape, a2.shape
if sh1 == sh2: # Exactly same dimensions; I don't know
# which dimension to combine along, so
# just create a new one.
ret = numpy.array([a1, a2])
combined = True
elif len(sh1) <> len(sh2): # Wholly different shapes;
# can't combine.
ret = (a1, a2)
combined = True
elif len(set(sh1).difference(sh2)) > 1: # Too many disparate dimensions.
ret = (a1, a2)
combined = True
else: # There must be exactly 1 non-common dimension.
if len(sh1)==0 or len(sh1)==1: # Scalar (0D) or Vector (1D):
ret = numpy.concatenate((a1.ravel(), a2.ravel()))
combined = True
else: # Array (>1 D)
common_dim = (numpy.arange(len(sh1)) * \
(True - (numpy.array(sh1) == set(sh1).intersection(sh2).pop()))).sum()
ret = numpy.concatenate((a1, a2), common_dim)
#if common_dim==0:
# ret = numpy.hstack((a1, a2))
# combined = True
#elif common_dim==1:
# ret = numpy.vstack((a1, a2))
# combined = True
combined = True
if not combined:
ret = (a1, a2)
return ret
try:
newobj = copy.deepcopy(obj1)
except:
try:
newobj = copy.deepcopy(obj2)
except:
print "copy.deepcopy() failed... could not copy either input object"
return -1
# Get all attributes:
att1all = dir(obj1)
att2all = dir(obj2)
# Exclude everything beginning with an underscore:
att1 = []
att2 = []
for att in att1all:
if att[0] == '_':
pass
else:
att1.append(att)
for att in att2all:
if att[0] == '_':
pass
else:
att2.append(att)
# Get attributes common to both objects:
common_atts = set(att1).intersection(att2)
abandoned_atts = (set(att1).union(att2)).difference(common_atts)
if len(common_atts)==0:
return -1
# Remove all non-common attributes:
for att in abandoned_atts:
if hasattr(newobj, att):
setattr(newobj, att, None)
# Combine all common attributes:
for att in common_atts:
if (not hasattr(newobj, att)) or (not hasattr(obj1, att)) or \
(not hasattr(obj2, att)): # Test for coding mistakes:
print "Couldn't find attribute '%s'; something went wrong!" % att
else:
setattr(newobj, att, combine_atts(getattr(obj1, att), getattr(obj2, att)))
return newobj
def multifunc(params, func1, func2, npar1, npar2=None, args1=(), args2=()):
"""Multiply two functions together.
:EXAMPLE:
::
import numpy as np
multifunc([.785, .785], np.cos, np.sin, 1) # returns cos(pi/4) * sin(pi/4) ~ 0.5
:EXAMPLE:
::
import pylab as py
import tools
x = 2*py.pi * py.linspace(-5, 5, 500)
y = tools.multifunc([x/8., x], np.sin, np.sin, 1).ravel()
py.plot(x, y)
"""
# 2011-06-03 11:26 IJC: Created
# 2012-06-10 16:08 IJMC: Now reflects preferred use of multifunc_general.
print "Multifunc() is deprecated; use multifunc_general() instead."
# Set the parameters for each function:
if npar2 is None:
npar2 = len(params) - npar1
return multifunc_general(params, (func1, func2), (npar1, npar2), (args1, args2))
def multifunc_general(params, funcs, npar=None, args=None):
"""Multiply results of several functions together.
:INPUTS:
params : sequence
Concatenated sequence of parameters (first arguments) for each
of the functions in 'funcs'.
funcs : function or tuple
Single function, or a tuple of several functions to call.
npar : tuple
If more than one function is used, npar must be a tuple
specifying the number of parameters passes to each function
(as its first input). E.g., if npar = (2, 3) then the result
will be: funcs[0](params[0:2]) * funcs[1](params[2:5])
args : tuple
If more than one function is used, args must be a tuple
specifying the additional arguments to be passed to each
function. E.g.: funcs[0](params[0:2], *args[0])
:EXAMPLE:
::
import numpy as np
multifunc_general([.785, .785], (np.cos, np.sin), (1, 1)) # returns cos(pi/4) * sin(pi/4) ~ 0.5
:EXAMPLE:
::
import pylab as py
import tools
x = 2*py.pi * py.linspace(-10, 10, 500)
y = tools.multifunc_general([x/8., x], (np.sin, np.sin), (1, 1)).ravel()
py.plot(x, y)
"""
# 2012-06-10 15:54 IJMC: Created; modified from multifunc
if not hasattr(funcs, '__iter__'):
ret = funcs(params, *args)
else:
ret = 1.
nfunc = len(funcs)
if npar is None:
print "Multiple functions input; npar ought not to be None!"
return -1
if args is None:
args = ((),) * nfunc
i0 = 0
for ii in range(nfunc):
these_params = params[i0:i0+npar[ii]]
i0 += npar[ii]
ret *= funcs[ii](these_params, *args[ii])
return ret
def sumfunc_general(params, funcs, npar=None, args=None):
"""Add results of several functions together.
:INPUTS:
params : sequence
Concatenated sequence of parameters (first arguments) for each
of the functions in 'funcs'.
funcs : function or tuple
Single function, or a tuple of several functions to call.
npar : tuple
If more than one function is used, npar must be a tuple
specifying the number of parameters passes to each function
(as its first input). E.g., if npar = (2, 3) then the result
will be: funcs[0](params[0:2]) * funcs[1](params[2:5])
args : tuple
If more than one function is used, args must be a tuple
specifying the additional arguments to be passed to each
function. E.g.: funcs[0](params[0:2], *args[0])
:EXAMPLE:
::
import numpy as np
sumfunc_general([.785, .785], (np.cos, np.sin), (1, 1))
:EXAMPLE:
::
import pylab as py
import tools
x = 2*py.pi * py.linspace(-10, 10, 500)
y = tools.sumfunc_general([x/8., x], (np.sin, np.sin), (1, 1)).ravel()
py.plot(x, y)
"""
# 2012-06-10 15:54 IJMC: Created; modified from multifunc_general
# 2014-08-03 18:07 IJMC: Fixed a little bug: ret had been
# initialized to unity, but should be zero.
if not hasattr(funcs, '__iter__'):
ret = funcs(params, *args)
else:
ret = 0.
nfunc = len(funcs)
if npar is None:
print "Multiple functions input; npar ought not to be None!"
return -1
if args is None:
args = ((),) * nfunc
i0 = 0
for ii in range(nfunc):
these_params = params[i0:i0+npar[ii]]
i0 += npar[ii]
ret += funcs[ii](these_params, *args[ii])
return ret
def sumfunc(params, func1, func2, npar1, npar2=None, args1=(), args2=()):
"""Add two functions together.
:EXAMPLE:
::
import numpy as np
sumfunc([.785, .785], np.cos, np.sin, 1) # returns cos(pi/4) + sin(pi/4) ~ XXX
:EXAMPLE:
::
import pylab as py
import tools
x = 2*py.pi * py.linspace(-5, 5, 500)
y = tools.sumfunc([x/8., x], np.sin, np.sin, 1).ravel()
py.plot(x, y)
"""
# 2011-10-27 11:50 IJMC: Created from multifunc
# Set the parameters for each function:
if npar2 is None:
npar2 = len(params) - npar1
params1 = params[0:npar1]
params2 = params[npar1:(npar1 + npar2)]
return func1(params1, *args1) + func2(params2, *args2)
def imrot(*varargin):
"""
Rotate a SQUARE image by theta (in radians)
:SYNTAX:
res = imrot (img, theta)
:NOTES:
This is VERY SLOW and only uses nearest-neighbor interpolation.
Image must be square; size remains the same
"""
# 2011-06-08 15:48 IJMC: Adapted to Python, but without interp2.
# 2006/11/30 IJC: Added interpolation option
# Written by Joseph Green at the Jet Propulsion Laboratory
from analysis import pad
from pylab import array, arange, meshgrid, cos, sin, isnan, dot, sqrt, find, np, dot, vstack
import pdb
nargin = len(varargin)
if nargin==1:
return varargin[0]
elif nargin==2:
img = varargin[0]
theta = varargin[1]
interpMethod = 'nn'
else:
img = varargin[0]
theta = varargin[1]
interpMethod = varargin[2];
npix = max(img.shape);
img = pad(img,npix);
xs = arange(-npix/2, npix/2) + 0.
x, y = meshgrid(xs,xs)
M = array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
xr=x.copy()
yr=y.copy()
xxr, yyr = dot(M, vstack((x.ravel(), y.ravel())))
#pdb.set_trace()
res = 0*img
xlim = xs.min(), xs.max()
for m in range(npix):
for n in range(npix):
if xr[m,n] < xlim[0] or xr[m,n] > xlim[1] or \
yr[m,n] < xlim[0] or yr[m,n] > xlim[1]:
res[m,n] = 0.
else:
distances = sqrt((x - xr[m,n])**2 + (y - yr[m,n])**2).ravel()
#distances = np.abs((x - xr[m,n]) + 1j*(y - yr[m,n]))
mindist = distances.min()
#locations = find(distances==mindist)
if mindist==0:
res[m,n] = (img.ravel()[distances==mindist]) #.mean()
else:
res[m,n] = 0.
return res
def readPSplotdata(filename,
xlims=None, ylims=None,
spacedelimiter=' ', pairdelimiter=','):
"""Read in a raw PostScript plot datafile and output Numpy arrays.
:INPUTS:
filename : str
filename of the data file.
xlims : tuple
(lowest x value of any data point, highest x value of any data point)
ylims : tuple
(lowest y value of any data point, highest y value of any data point)
spacedelimiter : str
delimiter between data pairs; by default a space (" ")
pairdelimiter : str
delimiter within data pairs; by default a comma (",")
:FILE_FORMAT:
The datafile should have the format:
"m x0,y0 dx1,dy1 dx2,dy2 ..."
:OUTPUTS:
(x, y) -- tuple of 1D arrays
"""
# 2011-08-24 09:20 IJMC: Created
# 2014-08-05 13:12 IJMC: Updated with optional axis-limit inputs;
# contributed by Corey Reed (cjreed@uci.edu).
from numpy import array
# Read file:
f = open(filename, 'r')
rawdata = f.readlines()
f.close()
# Check for validity
# Get datapoints
datavals = []
for line in rawdata:
datavals = datavals + line.split(spacedelimiter)
#
foundinit = False
for dataval in datavals:
if dataval.find(pairdelimiter)>-1:
xval, yval = map(float, dataval.split(pairdelimiter))
if not foundinit: # starting point
foundinit = True
x, y = [xval], [yval]
else: # differential
x.append(x[-1] + xval)
y.append(y[-1] + yval)
# calibrate
if ( (xlims!=None) and (ylims!=None) ):
xmin,xmax = min(x), max(x)
ymin,ymax = min(y), max(y)
xscale = (xlims[1] - xlims[0]) / (xmax-xmin)
yscale = (ylims[1] - ylims[0]) / (ymax-ymin)
xoff = xlims[0] - (xmin*xscale)
yoff = ylims[0] - (ymin*yscale)
x[:] = [(i*xscale)+xoff for i in x]
y[:] = [(i*yscale)+yoff for i in y]
return array(x), array(y)
def bnu(T, lam):
"""Planck function in frequency.
:INPUTS:
T : scalar or array
temperature in Kelvin
lam : scalar or array
wavelength in microns [but intensity will be per Hz]
Value returned is in cgs units: erg/s/cm^2/Hz/sr
"""
# 2011-11-04 10:47 IJMC: Added to Tools
# 2013-02-19 19:42 IJMC: Updated to use precise value of c.
from numpy import exp
c = 29979245800. # cm/s
nu = c/(lam/1e4)
h = 6.626068e-27 # cgs units
k = 1.3806503e-16 # cgs units
expo = h*nu/(k*T)
nuoverc = 1./ (lam/1e4)
return ((2*h*nuoverc**2 * nu)) / (exp(expo)-1)
def blam(T, lam):
"""Planck function in wavelength.
:INPUTS:
T : scalar or array
temperature in Kelvin
lam : scalar or array
wavelength in microns
Value returned is in (nearly) cgs units: erg/s/cm^2/micron/sr
"""
# 2012-06-12 19:56 IJMC: Created.
# 2013-02-19 19:42 IJMC: Updated to use precise value of c.
c = 29979245800. # cm/s
nu = c/(lam/1e4) # Hz
# Take advantage of the fact that (lam F_lam) = (nu F_nu):
return bnu(T, lam) * (nu / lam)
def planet2temp(rprs, fpfs, teff, lam=24., gamma=0.8, ntrials=10000):
"""Convert planet/star size and flux contrasts to brightness temperatures.
:INPUTS:
rprs : 2-tuple, either
[0] : (value, dist) where dist is the sampled posterior
distribution of planet/star radius ratio (e.g., from
MCMC output). OR:
[1] : (value, uncertainty)
fpfs : 2-tuple, either
[0] : (value, dist) where dist is the sampled posterior
distribution of planet/star flux ratio (e.g., from
MCMC output). OR:
[1] : (value, uncertainty)
teff : 2-tuple, either
[0] : (value, dist) where dist is the sampled posterior
distribution of stellar effective temperatures (e.g., from
MCMC output). OR:
[1] : (value, uncertainty)
lam : float
wavelength of observations, in microns
gamma : float
factor to account for the fact that a star's infrared flux is
lower than predicted by a blackbody of a given effective
temperature. Set to 0.8 for 24 microns.
ntrials : int
Number of times to resample distributions.
:REQUIREMENTS:
:doc:`scipy.optimize`, :doc:`numpy`
"""
# 2011-11-04 11:27 IJMC: Created
from numpy import random
from scipy import optimize, array
def err_temp_bb(temp, bnu_t, lam):
return bnu_t - bnu(temp, lam)
if not hasattr(rprs, '__iter__'):
rprs = (rprs, 0)
if not hasattr(fpfs, '__iter__'):
fpfs = (fpfs, 0)
if not hasattr(teff, '__iter__'):
teff = (teff, 0)
# Create resampled distributions:
npts_r = len(rprs)
npts_f = len(fpfs)
npts_t = len(teff)
resample_r = hasattr(rprs[1], '__iter__')
resample_f = hasattr(fpfs[1], '__iter__')
resample_t = hasattr(teff[1], '__iter__')
rprs_0 = rprs[0]
fpfs_0 = fpfs[0]
teff_0 = teff[0]
if not resample_r:
rprs = random.normal(rprs[0], rprs[1], ntrials)
else:
rprs = array(rprs[1], copy=False)[random.uniform(0, len(rprs[1]), ntrials).astype(int)]
if not resample_f:
fpfs = random.normal(fpfs[0], fpfs[1], ntrials)
else:
fpfs = array(fpfs[1], copy=False)[random.uniform(0, len(fpfs[1]), ntrials).astype(int)]
if not resample_t:
teff = random.normal(teff[0], teff[1], ntrials)
else:
teff = array(teff[1], copy=False)[random.uniform(0, len(teff[1]), ntrials).astype(int)]
planet_flux = fpfs * bnu(teff*gamma, lam) / (rprs**2)
planet_flux_0 = fpfs_0 * bnu(teff_0*gamma, lam) / (rprs_0**2)
planet_temps = array([optimize.fsolve(err_temp_bb, 1000, \
args=(thisflux, lam)) \
for thisflux in planet_flux]).ravel()
planet_temp_0 = optimize.fsolve(err_temp_bb, 1000, \
args=(planet_flux_0, lam))
return (planet_temp_0, planet_temps)
def erf_approx(z, N):
"""Weideman 1994's approximate complex error function.
:INPUTS:
z : real or complex float
N : number of terms to use.
:NOTES:
returns w(z) = exp(-z**2) erfc(-1j*z)
"""
# 2011-11-14 17:01 IJMC: Created
import numpy as np
M = 2 * N
M2 = 2*M # number of sampling points
k = np.arange(-M+1, M).reshape(M2 - 1, 1)
L = np.sqrt(N / np.sqrt(2))
theta = k * np.pi/M
t = L * np.tan(theta/2)
# Function to be transformed:
f = np.exp(-t**2) * (L**2 + t**2)
f = np.concatenate(([0], f))
def ee_psf(psf, energy=0.8, center=None):
"""%% Determine the diameter in pixels for a given Encircled Energy
%
% [d, ee] = ee_psf(psf, energy)
% [d, ee] = ee_psf(psf, energy, center);
%
% INPUTS: psf - array representing a point spread function
% energy- encircled energy percentage (default = 0.8).
% Can also be a vector of values, in which case the
% outputs 'd' and 'encircledenergy' are also vectors.
% OPTIONAL INPUT:
% center- [x y] coordinates of the center of the psf. If not
% passed, defaults to the coordinates of the maximum-
% valued point of the psf array.
%
% OUTPUTS: d - diameter of circle enclosing 'energy' [pix] at the
% corresponding desired encircled energy value
% ee - encircled energy at computed 'd' at the
% corresponding desired encircled energy value
%function [d, encircledenergy] = ee_psf(psf, energy, center)
"""
# 2012-01-06 15:48 IJMC: Converted from Matlab to Python;
# originally written at JPL.
from analysis import pad
import numpy as np
psf = np.array(psf, copy=False)
npix = max(psf.shape)
psf = pad(psf,npix);
if center is None:
xc, yc = np.nonzero(psf==psf.ravel().max())
center = [yc, xc];
if energy is None:
energy = np.array([0.8])
elif not hasattr(energy, '__iter__'):
energy = np.array([energy])
else:
energy = np.array(energy, copy=False)
encircledenergy = np.zeros(len(energy)) # initialize encircled energy array
d = np.zeros(len(energy)) # initialize diameter-pixel array
xc, yc = center[0:2]
ee = 0
eeold = -1
xs = np.arange(npix) - xc
ys = np.arange(npix) - yc
[x,y] = np.meshgrid(xs, ys)
r = np.abs(x + 1j*y)
energy_tot = psf.sum()
for inc_energy in range(len(energy)):
rad = 0
ee = 0
dfactor = 4 # these two lines determine how it searches
dpix = dfactor**np.floor(np.log(npix)/np.log(dfactor) - 1)
while (ee <> eeold):
while (ee < energy[inc_energy]) and (ee<>eeold):
rad = rad+dpix
emasked = (r < rad)*psf
eeold = ee
ee = emasked.sum()/energy_tot
#disp(['Inner: Rad=' num2str(rad) ' ,Encircled energy = ' num2str(ee)])
rad = rad - dpix
dpix = dpix/dfactor
rad = rad + dpix
emasked = (r < rad)*psf
eeold = ee
ee = emasked.sum()/energy_tot
#disp(['Outer: Rad=' num2str(rad) ', encircled energy = ' num2str(ee)])
d[inc_energy] = 2*rad
encircledenergy[inc_energy] = ee
return d, encircledenergy
def extinct_cardelli(lam_um, RV=3.1, warn=True):
"""Compute the Cardelli et al. 1989 A_lam/A_V extinction (reddening).
:INTPUTS:
lam_um : float or Numpy array
wavelength desired, in microns
RV : float
R_V extinction parameter
warn : bool
If True, print a warning if wavelength is outside the valid range.
:OUTPUTS:
extinct : float or Numpy array
extinction -- A_lambda/A_V
:NOTES:
This is only valid for wavelengths in the range 0.3 - 3.3 microns!
"""
# 2012-02-24 23:06 IJMC: Created
from numpy import array
if not hasattr(lam_um, '__iter__'):
lam_um = array([lam_um], copy=True)
else:
lam_um = array(lam_um, copy=True)
#lam_um = 1./lam_um
ind0 = lam_um < (1./1.1) # visible
ind1 = lam_um >= (1./1.1) # near-infrared
ind2 = (lam_um < 0.25) + (lam_um > 3.5)
extinct = 0*lam_um
if warn and ind2.any():
print "Wavelengths found outside valid range (0.3-3 microns). Results are suspect."
x = 1./lam_um
if ind0.any():
y = x[ind0] - 1.82
extinct[ind0] += 1.
a = [0.17699, -0.50447, -0.02427, 0.72085, 0.01979, -0.77530, 0.32999]
b = [1.41338, 2.28305, 1.07233, -5.38434, -0.62251, 5.30260, -2.09002]
ypow = y.copy()
for ii in range(7):
extinct[ind0] += (a[ii] + b[ii] / RV) * ypow
ypow *= y
if ind1.any():
extinct[ind1] = x[ind1]**1.61 * (0.574 - 0.527 / RV)
return extinct
def shift_image(im, dx=0, dy=0, buffer=0):
"""Shift a 2D image by the specified number of integer pixels.
:INPUTS:
im : 2D Numpy array
numpy array of desired size
dx : int
number of pixels to shift in x direction
dy : int
number of pixels to shift in y direction
buffer : scalar
Default value for unfilled pixels
"""
# 2012-02-25 02:46 IJMC: Created
from numpy import array, abs
im = array(im, copy=False)
nx, ny = im.shape
newim = 0*im + buffer
if abs(dx)<nx and abs(dy)<ny:
xnew1 = max(0, dx)
xnew2 = min(nx, nx+dx)
xorg1 = max(0, -dx)
xorg2 = min(nx, nx-dx)
ynew1 = max(0, dy)
ynew2 = min(ny, ny+dy)
yorg1 = max(0, -dy)
yorg2 = min(ny, ny-dy)
newim[xnew1:xnew2,ynew1:ynew2] = im[xorg1:xorg2,yorg1:yorg2]
return newim
def resamp(frame, resamp, retcoords=False):
"""Resample a 2D array by a given factor, using bilinear interpolation.
:INPUTS:
frame : 2D Numpy Array
data array to be resampled
resamp : positive scalar
resampling factor (typically an integer greater than 1)
retcoords: bool
If True, return tuple (frame2, x2, y2)
:NOTES:
Result needs to be scaled by (1/resamp^2) to conserve flux
"""
# 2012-02-25 07:21 IJMC: Created
# 2012-02-26 14:19 IJMC: Added retcoords option
from numpy import array, arange
from scipy import interpolate
# Parse inputs:
resamp = float(resamp)
frame = array(frame, copy=False)
nx0, ny0 = frame.shape
nx = ((nx0 - 1)*resamp + 1.) # Avoid resampling at pixel locations
ny = ((ny0 - 1)*resamp + 1.) # outside the original boundaries.
xx0 = range(nx0)
yy0 = range(ny0)
x1,y1 = arange(nx)/resamp, arange(ny)/resamp
rectspline = interpolate.fitpack2.RectBivariateSpline(xx0, yy0, frame, kx=1, ky=1, s=0)
frame2 = rectspline(x1, y1)#/resamp/resamp
if retcoords:
ret = frame2, x1, y1
else:
ret = frame2
return ret
def plotlikelihood_2d(L, x=None, y=None, conf=[0.683], figsize=[8, 6], contourargs=dict(colors='k'), posteriorargs=dict(color='k'), limitargs=dict(color='k', linestyle=':'), xlabel=None, ylabel=None, buffers=[.1, .1, .1, .1]):
"""Plot contours and histograms for 2D likelihood array.
:INPUTS:
L : 2d Numpy array
Likelihood values, not necessarily normalized. (Remember that
L = exp[-chi^2 / 2.] )
x : sequence
Values along the first dimension of L. Thus len(x) must equal
L.size[0]
y : sequence
Values along the second dimension of L. Thus len(x) must equal
L.size[1]
figsize : 2-sequence
Size of figure to be created, in inches.
conf : scalar or sequence
Confidence intervals to plot
contourargs : dict
Keyword arguments to be passed to matplotlib.contour
posteriorargs : dict
Keyword arguments to be passed to matplotlib.plot for
posterior distributions
limitargs : dict
Keyword arguments to be passed to matplotlib.plot for
1D confidence limits
xlabel : str
ylabel : str
buffers : 4-sequence
fractional buffer width around edges: [left, right, bottom, top]
"""
# 2012-06-19 11:47 IJMC: Created
from kdestats import confmap
import pylab as py
if not hasattr(conf, '__iter__'):
conf = [conf]
nconf = len(conf)
# Define array indices:
nx, ny = L.shape
x0 = np.arange(nx)
y0 = np.arange(ny)
# Define axis values:
if x is None:
x = x0.copy()
if y is None:
y = y0.copy()
# Define axis labels:
if xlabel is None:
xlabel = ''
if ylabel is None:
ylabel = ''
# Define marginalized posterior probability distributions:
yppd = L.sum(0)
xppd = L.sum(1)
# Define confidence levels:
conflevels = confmap(L, conf)
xconf = [[np.interp(val, np.cumsum(xppd/xppd.sum()), x) for val in [(1.-cl)/2., 1. - (1.-cl)/2.]] for cl in conf]
yconf = [[np.interp(val, np.cumsum(yppd/yppd.sum()), y) for val in [(1.-cl)/2., 1. - (1.-cl)/2.]] for cl in conf]
# Prepare for plotting:
lbuffer, rbuffer, bbuffer, tbuffer = buffers
eff_width = 1. - lbuffer - rbuffer
eff_height = 1. - tbuffer - bbuffer
contour_frac = 0.6
# Begin plotting:
py.figure(nextfig(), figsize)
cax = py.subplot(2, 2, 3, position=[lbuffer, bbuffer, eff_width*contour_frac, eff_height*contour_frac])
py.contour(x, y, L.transpose(), conflevels, **contourargs)
py.minorticks_on()
py.xlabel(xlabel)
py.ylabel(ylabel)
axlim = py.axis()
for limits in yconf:
py.plot(axlim[0:2], [limits[0]]*2, **limitargs)
py.plot(axlim[0:2], [limits[1]]*2, **limitargs)
for limits in xconf:
py.plot([limits[0]]*2, axlim[2:4], **limitargs)
py.plot([limits[1]]*2, axlim[2:4], **limitargs)
hax = py.subplot(2, 2, 1, position=[lbuffer, bbuffer + eff_height*contour_frac + 1e-5, eff_width*contour_frac, eff_height*(1. - contour_frac)])
hax.plot(x, xppd, **posteriorargs)
x_ylim = py.ylim()
for limits in xconf:
py.plot([limits[0]]*2, x_ylim, **limitargs)
py.plot([limits[1]]*2, x_ylim, **limitargs)
py.ylim(x_ylim)
py.xlim(axlim[0:2])
yt = py.yticks()[0]
py.xticks(py.xticks()[0], [])
py.yticks(yt[yt>0], [])
py.ylim(x_ylim)
py.xlim(axlim[0:2])
py.minorticks_on()
hay = py.subplot(2, 2, 4, position=[lbuffer + eff_width*contour_frac + 1e-5, bbuffer, eff_width*(1. - contour_frac), eff_height*contour_frac])
hay.plot(yppd, y, **posteriorargs)
y_xlim = py.xlim()
for limits in yconf:
py.plot(y_xlim, [limits[0]]*2, **limitargs)
py.plot(y_xlim, [limits[1]]*2, **limitargs)
py.ylim(axlim[2::])
py.xlim(y_xlim)
xt = py.xticks()[0]
py.xticks(xt[xt>0], [])
py.yticks(py.yticks()[0], [])
py.xlim(y_xlim)
py.ylim(axlim[2::])
py.minorticks_on()
return cax, hax, hay
def areaOverlappingCircles(r1, r2, d):
"""Return overlapping area of two circles. From Wolfram Mathworld.
r1, r2 are the radii of the circles.
d is the distance between their centers.
"""
# 2012-07-30 15:40 IJMC: Created, from Wolfram Mathworld
if r1> r2:
temp = r1
r1 = r2
r2 = temp
r1s = r1*r1
r2s = r2*r2
if not hasattr(d, '__iter__'):
d = np.array([d])
else:
d = np.array(d, copy=False)
valid_index = (d < (r1+r2)) * (d > (r2-r1))
fulloverlap_index = d <= (r2-r1)
ds = d[valid_index]**2
area = np.zeros(d.shape)
area[fulloverlap_index] = np.pi*r1s
area[valid_index] = \
r1s * np.arccos((ds + r1s - r2s) / (2*d[valid_index]*r1)) + \
r2s * np.arccos((ds + r2s - r1s) / (2*d[valid_index]*r2)) - \
0.5 * np.sqrt((-d[valid_index]+r1+r2)*(d[valid_index]+r1-r2)*(d[valid_index]-r1+r2)*(d[valid_index]+r1+r2))
return area
def findRectangles(a, minsepy=None, minsepx=None, edgepad=10):
"""Find corner coordinates of approximate rectangle shapes in an array.
:INPUTS:
a : 2D Numpy array
minsep : scalar
Minimum separation from one upper or left-hand border to the
next. (cf. :func:`find_peaks`)
edgepad : int
Pad the array with this many zeros on all sides (to find
rectangles abutting array edges)
"""
# 2012-08-09 23:51 IJMC: Created
# 2012-08-17 14:53 IJMC: Added 'edgepad' option.
# 2012-10-21 22:46 IJMC: Fixed 'x_borders' when edgepad is called.
from scipy.signal import medfilt2d
from analysis import pad
dtype = a.dtype
if dtype not in (np.float32, np.float64):
a = a.astype(np.float32)
#sm = medfilt2d(a, 5)
sm = pad(medfilt2d(a, 5), a.shape[0]+edgepad*2, a.shape[1]+edgepad*2)
smlox = np.hstack((np.diff(sm, axis=1), np.zeros((sm.shape[0],1))))
smloy = np.vstack((np.diff(sm, axis=0), np.zeros(sm.shape[0])))
boxcar = np.ones(5)/5.
smloys = np.convolve(smloy.sum(1), boxcar, 'same')
smloxs = np.convolve(smlox.sum(0), boxcar, 'same')
y_lowers = np.sort(find_peaks(smloys, minsepy, thresh=10)[1]) - edgepad
y_uppers = np.sort(find_peaks(-smloys, minsepy, thresh=10)[1]) - edgepad
nreg = y_uppers.size
corners = np.zeros((nreg, 4), dtype=int)
corners[:,2] = y_lowers + 1
corners[:,3] = y_uppers + 1
for ii in range(nreg):
subreg = smlox[y_lowers[ii]:y_uppers[ii]]
xedges = np.abs(np.convolve(subreg.sum(0), boxcar, 'same'))
x_borders = np.sort(find_peaks(xedges, minsepx)[1][0:2])
corners[ii,0:2] = x_borders - edgepad+1
return corners
def extractSubregion(fitsfilename, corners=None, dx=None, dy=None, kw='subreg', retall=False):
"""Extract a specified rectangular subregion from a FITS file.
:INPUTS:
fitsfilename : str
Name of the (2D) FITS file.
corners : str, 4-sequence
if sequence: [x0, x1, y0, y1], corners of subregion.
if str: header keyword containing this sequence.
In either case, the extracted subregion (when dx=dy=0) will be:
data[corners[2]:corners[3], corners[0]:corners[1]]
dx : None, 2-sequence
If sequence: [x0, x1] will become [x0-dx[0], x1+dx[1]]
dy : None, 2-sequence
If sequence: [y0, y1] will become [y0-dy[0], y1+dy[1]]
kw : None, str
If str: this header keyword will be updated with the new
corners (possibly modified by dx, dy)
:OUTPUTS:
(subregion_data, [fitsfile_header, corners_used])
If the specified header keyword is not found, or the specified
corners return an error, then this function will crash inelegantly.
:NOTE:
WCS headers will not be updated, so be careful when using this
routine for imaging data!
"""
# 2012-08-28 15:25 IJMC: Created
# 2013-01-20 14:50 IJMC: Clarified documentation of 'corners' input.
try:
from astropy.io import fits as pyfits
except:
import pyfits
if dx is None:
dx = 0
if dy is None:
dy = 0
if not hasattr(dx, '__iter__'):
dx = [dx]
if not hasattr(dy, '__iter__'):
dy = [dy]
if len(dx) < 2:
dx = [dx[0], dx[0]]
if len(dy) < 2:
dy = [dy[0], dy[0]]
if not isinstance(fitsfilename, np.ndarray):
data = pyfits.getdata(fitsfilename)
header = pyfits.getheader(fitsfilename)
if isinstance(corners, str):
corners = scrapeints(header[corners])
else:
data = fitsfilename
header = pyfits.Header()
ny, nx = data.shape
newcorners = [max(0, corners[0]-dx[0]), min(nx, corners[1]+dx[1]), max(0, corners[2]-dy[0]), min(ny, corners[3]+dy[1])]
subreg = data[newcorners[2]:newcorners[3], newcorners[0]:newcorners[1]]
#pdb.set_trace()
header.update('dx', str(dx))
header.update('dy', str(dy))
if kw is not None:
header.update(kw, str(newcorners))
if retall:
ret = subreg, header, newcorners
else:
ret = subreg
return ret
def scrapeints(string):
"""Extract a series of integers from a string. Slow but robust.
readints('[124, 56|abcdsfad4589.2]')
will return:
[124, 56, 4589, 2]
"""
# 2012-08-28 16:19 IJMC: Created
numbers = []
nchar = len(string)
thisnumber = ''
for n, char in enumerate(string):
try:
val = int(char)
anumber = True
except:
anumber = False
if anumber:
thisnumber = thisnumber + char
else: #if (not anumber) or n==(nchar-1):
if len(thisnumber)>0:
numbers.append(int(thisnumber))
thisnumber = ''
return numbers
def array_or_filename(input, kw_getdata=dict(), kw_array=dict(), noneoutput=None):
"""If input is a Numpy array, return it. If it is of type str,
use Pyfits to read in the file and return it. Keyword options are
available for the calls to pyfits.getdata and numpy.array. If
input is None, return noneoutput."""
# 2012-09-03 11:43 IJMC: Created
from pyfits import getdata
if input is None:
output = noneoutput
else:
if isinstance(input, str):
output = getdata(input, **kw_getdata)
else:
output = np.array(input, **kw_array)
return output
def roundparams(value, error, seconderror=None, extraplaces=1):
"""Round a value and its associated error(s) to 2 decimal places.
:INPUTS:
value, error : scalars
:OUTPUTS:
value, error
:EXAMPLE:
::
import tools
tools.roundparams(1.23456, .09876)
:SEE_ALSO:
:func:`roundvals`
"""
# 2012-09-27 17:47 IJMC: Created
if seconderror is not None:
pow = np.abs(np.floor(np.log10(0.5*(np.abs(error)+np.abs(seconderror)))))
else:
pow = np.abs(np.floor(np.log10(np.abs(error))))
value = np.round(value, decimals=pow+extraplaces)
val1 = np.round(error, decimals=pow+extraplaces)
ret = value, val1
fstrp = '%+'+ ('1.%i' % (pow+1)) + 'f'
fstrm = '%+'+ ('1.%i' % (pow+1)) + 'f'
if seconderror is not None:
val2 = np.round(seconderror, decimals=pow+1)
ret = ret + (val2,)
fstr = '$%s^{%s}_{%s}$' % (fstrp,fstrp,fstrm)
retstr = fstr % (value, val1, val2)
else:
fstr = '$%s \\pm %s$' % (fstrp,fstrp)
retstr = fstr % (value, val1)
return retstr
def sample_3dcdf(cdf, x, y, z, nsamp=1, verbose=False):
"""Sample a 3D Probability Distribution Function (2d-histogram)
:INPUTS:
CDF : 3D NumPy array
Three-dimensional (N x M x L) probability distribution function
(histogram) from which you wish to draw samples. This need not
be in any particular normalized form -- the only condition is
that the value in each cell is proportional to the probability
of that cell.
x, y, z : 1D NumPy arrays
N+1, M+1, L+1 values indexing the cells of the CDF (one per row/column)
nsamp : int
Number of samples to be drawn.
:NOTES:
Note that this approach uses simple, dumb nearest-neighbor
interpolation when drawing candidate samples. Exceedingly
granular CDFs may suffer.
Note that this code isn't optimized; try it with small samples
before you use it on large ones!
"""
# 2015-07-21 14:23 IJMC: Created from 2D version.
# Initialize:
cdf = np.array(cdf, copy=False)
x = np.array(x, copy=False)
y = np.array(y, copy=False)
z = np.array(z, copy=False)
ret1 = np.zeros(nsamp, dtype=y.dtype)
ret2 = np.zeros(nsamp, dtype=x.dtype)
ret3 = np.zeros(nsamp, dtype=x.dtype)
xcenters = 0.5 * (x[1:] + x[0:-1])
ycenters = 0.5 * (y[1:] + y[0:-1])
zcenters = 0.5 * (z[1:] + z[0:-1])
#NOrmalize:
prob = 1.0 * cdf / cdf.max()
success_estimate = 1.0 * prob.sum() / prob.size
n_maxsamp = int(np.ceil(2.*nsamp/success_estimate))
if verbose:
print "Success estimate is %1.3e" % success_estimate
print "Will need to draw approximately %i samples to generate desired number (%i)." % (n_maxsamp, nsamp)
# Generate initial random samples to draw from:
sampx = np.random.uniform(low=x.min(), high=x.max(), size=n_maxsamp)
sampy = np.random.uniform(low=y.min(), high=y.max(), size=n_maxsamp)
sampz = np.random.uniform(low=z.min(), high=z.max(), size=n_maxsamp)
accept_prob = np.random.uniform(size=n_maxsamp)
nsampled = 0
ii = 0
while nsampled < nsamp:
# Locate the candidate sample in the CDF array:
ind_x = (np.abs(xcenters-sampx[ii])==np.abs(xcenters-sampx[ii]).min())
ind_y = (np.abs(ycenters-sampy[ii])==np.abs(ycenters-sampy[ii]).min())
ind_z = (np.abs(zcenters-sampz[ii])==np.abs(zcenters-sampz[ii]).min())
# Accept or reject the candidate sample:
if prob[ind_x, ind_y, ind_z] > accept_prob[ii]:
ret1[nsampled] = sampx[ii]
ret2[nsampled] = sampy[ii]
ret3[nsampled] = sampz[ii]
nsampled += 1
ii += 1
# If we ran out of random samples, draw more:
if ii >= n_maxsamp:
sampx = np.random.uniform(low=x.min(), high=x.max(), size=n_maxsamp)
sampy = np.random.uniform(low=y.min(), high=y.max(), size=n_maxsamp)
sampz = np.random.uniform(low=z.min(), high=z.max(), size=n_maxsamp)
accept_prob = np.random.uniform(size=n_maxsamp)
ii = 0
return ret1, ret2, ret3
def sample_2dcdf(cdf, x, y, nsamp=1, verbose=False):
"""Sample a 2D Probability Distribution Function (2d-histogram)
:INPUTS:
CDF : 2D NumPy array
Two-dimensional (N x M) probability distribution function
(histogram) from which you wish to draw samples. This need not
be in any particular normalized form -- the only condition is
that the value in each cell is proportional to the probability
of that cell.
x : 1D NumPy array
N+1 Values indexing the cells of the CDF (one per row), evenly spaced
y : 1D NumPy array
M+1 Values indexing the cells of the CDF (one per column), evenly spaced
nsamp : int
Number of samples to be drawn.
:NOTES:
Note that this approach uses simple, dumb nearest-neighbor
interpolation when drawing candidate samples. Exceedingly
granular CDFs may suffer.
Note that this code isn't optimized; try it with small samples
before you use it on large ones!
"""
# 2012-09-30 21:44 IJMC: Created
# Initialize:
cdf = np.array(cdf, copy=False)
x = np.array(x, copy=False)
y = np.array(y, copy=False)
ret1 = np.zeros(nsamp, dtype=y.dtype)
ret2 = np.zeros(nsamp, dtype=x.dtype)
xcenters = 0.5 * (x[1:] + x[0:-1])
ycenters = 0.5 * (y[1:] + y[0:-1])
#NOrmalize:
prob = 1.0 * cdf / cdf.max()
success_estimate = 1.0 * prob.sum() / prob.size
n_maxsamp = int(2*nsamp/success_estimate)
if verbose:
print "Success estimate is %1.3e" % success_estimate
print "Will need to draw approximately %i samples to generate desired number (%i)." % (n_maxsamp, nsamp)
# Generate initial random samples to draw from:
sampx = np.random.uniform(low=x.min(), high=x.max(), size=n_maxsamp)
sampy = np.random.uniform(low=y.min(), high=y.max(), size=n_maxsamp)
accept_prob = np.random.uniform(size=n_maxsamp)
nsampled = 0
ii = 0
while nsampled < nsamp:
# Locate the candidate sample in the CDF array:
ind_x = (np.abs(xcenters-sampx[ii])==np.abs(xcenters-sampx[ii]).min())#.nonzero()
ind_y = (np.abs(ycenters-sampy[ii])==np.abs(ycenters-sampy[ii]).min())#.nonzero()
# Accept or reject the candidate sample:
if prob[ind_x, ind_y] > accept_prob[ii]:
ret1[nsampled] = sampx[ii]
ret2[nsampled] = sampy[ii]
nsampled += 1
ii += 1
# If we ran out of random samples, draw more:
if ii >= n_maxsamp:
sampx = np.random.uniform(low=x.min(), high=x.max(), size=n_maxsamp)
sampy = np.random.uniform(low=y.min(), high=y.max(), size=n_maxsamp)
accept_prob = np.random.uniform(size=n_maxsamp)
ii = 0
return ret1, ret2
def rejectionSampling(priors, fitargs, nsamps):
"""
BLAH
priors : sequence of 2-tuples (limits for uniform priors) or (EVENTUALLY) 2xN arrays
fitargs : for computing chisq
TBD:
nonuniform priors
multithreading (Pool.map, rather than map)
"""
# 2015-12-01 14:58 IJMC: Created
nparam = len(priors)
if np.any([len(pri)<>2 for pri in priors]):
print "WARNING: for now, all priors must be uniform and entered as 2-tuples."
return -1
priors = np.array(priors, copy=True)
if priors.shape[1]>priors.shape[0]:
priors = priors.T
lower_limits = priors[:,0]
upper_limits = priors[:,1]
outputs = np.zeros((nsamps, nparam), float)
samples = np.random.uniform(lower_limits, upper_limits, (nsamps, nparam))
likelihoods = np.array([pc.lnprobfunc(sample, *fitargs) for sample in samples])
def sample_1dcdf(pdf, x, nsamp=1, verbose=False, absnorm=False):
"""Sample a 1D Posterior Distribution Function (1d-histogram)
:INPUTS:
PDF : 1D NumPy array
Distribution function (histogram) from which you wish to draw
samples. This need not be in any particular normalized form --
the only condition is that the value in each cell is
proportional to the probability of that cell.
x : 1D NumPy array
N Values indexing the cells of the CDF (one per row)
nsamp : int
Number of samples to be drawn.
absnorm : bool
If True, normalize pdf so that it integrates to 1.0
"""
# 2012-10-28 13:13 IJMC: Created.
# 2012-11-20 16:18 IJMC: Fixed a small bug.
# Initialize:
argind = np.argsort(x)
x = np.array(x, copy=False)[argind]
pdf = np.array(pdf, copy=False)[argind]
ret = np.zeros(nsamp, dtype=x.dtype)
#NOrmalize:
#if absnorm:
prob = 1.0 * pdf / pdf.max()
#else:
# prob = pdf
success_estimate = 1.0 * prob.sum() / prob.size
n_maxsamp = int(2*nsamp/success_estimate)
if verbose:
print "Success estimate is %1.3e" % success_estimate
print "Will need to draw approximately %i samples to generate desired number (%i)." % (n_maxsamp, nsamp)
# Generate initial random samples to draw from:
nsampled = 0
ii = 0
while nsampled < nsamp:
sampx = np.random.uniform(low=x.min(), high=x.max(), size=n_maxsamp)
accept_prob = np.interp(sampx, x, prob, left=0.0, right=0.0)
accept_ind = np.random.uniform(low=0., high=1., size=n_maxsamp) < accept_prob
n_accepted = accept_ind.sum()
ind0 = nsampled
ind1 = min(nsampled+n_accepted, nsamp)
ret[ind0:ind1] = sampx[accept_ind][0:ind1-ind0]
nsampled += n_accepted
#pdb.set_trace()
#if verbose:
# print "%i sampled accepted" % n_accepted
# print nsampled, n_accepted, nsamp
return ret
def textfig(textlist, **kw):
"""Generate a figure from a list of strings.
:INPUTS:
textlist : list or str
List of text strings, one per entry.
:OPTIONS:
any options valid for passing to matplotlib.text
Also: the dict 'figoptions' will be passed to figure()
:RETURNS:
(fig, ax) -- handles to Figure and Axes that were created
"""
# 2013-03-10 13:27 IJMC: Created
# 2014-08-14 17:41 IJMC: Changed font family to 'monospace',
# because 'Courier' led to bugs when saving
# PDF figures.
# 2014-10-24 00:10 IJMC: Added 'fig' and 'ax' options.
# 2015-12-05 06:57 IJMC: Now input can be a string (instead of a list only)
import pylab as py
# Handle input options:
defaults = dict(horizontalalignment='left', fontsize=9, family='monospace', weight='bold', fig=None, ax=None)
if kw.has_key('figoptions'):
figoptions = kw.pop('figoptions')
else:
figoptions = dict()
if kw.has_key('axoptions'):
axoptions = kw.pop('axoptions')
else:
axoptions = dict(position=[.02, .02, .96, .9])
for key in defaults.keys():
if not kw.has_key(key):
kw[key] = defaults[key]
# Generate figure and axis:
fig = kw.pop('fig')
if fig is None:
fig = py.figure(nextfig(), **figoptions)
ax = kw.pop('ax')
if ax is None:
ax = py.axes(**axoptions)
try:
junk = '' + textlist
textlist = [textlist]
except:
pass
nlines = len(textlist)
vertpos = py.linspace(.95, .05, nlines)
py.xticks([])
py.yticks([])
# Plot the text:
for line, pos in zip(textlist, vertpos):
py.text(.05, pos, line, transform=ax.transAxes, **kw)
return fig, ax
def roundvals_latex(input, parenmode=False):
"""Where 'input' is of the form (value, lower_lim, upper_lim).
:EXAMPLE:
::
import tools
tools.roundvals_latex([235, -4, 0.045380])
"""
# 2016-03-16 22:26 IJMC: Created
# 2016-05-23 16:57 IJMC: Kludgey fix for bigger values
initial = np.array(map(roundvals, input))
output = []
for tup in initial:
print tup
if len(tup)==2 or (tup[1]==tup[2]):
if parenmode:
ndigit = 2
if max(map(float, tup[1:]))>10: ndigit = int(np.ceil(np.log10(max(map(float, tup[1:])))))
unc = tup[1][-ndigit:]
if '.' in unc: unc = tup[1][-3:]
string = '%s(%s)' % (tup[0], unc)
else:
string = '$%s\pm%s$' % tuple(tup[0:2])
elif len(tup)>2:
string = '$%s_{-%s}^{+%s}$' % tuple(tup[0:3])
else:
string = '$%s$' % tup[0]
output.append(string)
return output
def roundvals(input, ndigit=2, strout=True):
"""Round all input values to the smallest number of digits used.
:INPUTS:
input : scalar, or 1D list or array
values to be rounded
ndigit : int
Number of significant digits to be retained
strout : bool
Return text strings? If not, return a 1D NumPy array.
:EXAMPLE:
::
import tools
tools.roundvals([235, -4, 0.045380])
:SEE_ALSO:
:func:`roundparams`
"""
# 2013-03-11 08:29 IJMC: Created
# 2013-04-20 09:35 IJMC: Now, don't be thrown off by zero values.
# 2014-06-21 14:59 IJMC: Fixed problem with all-NaN inputs.
# 2016-02-25 05:17 IJMC: Fixed problem with all-zero inputs.
if not hasattr(input, '__iter__'):
input = [input]
scalarInput = True
else:
scalarInput = False
input = np.array(input, copy=False)
#np.abs(input).min()
if (input==0.).all():
ndig = 0
elif np.isfinite(input).any():
ndig = np.abs(np.floor(np.log10(np.abs(input[np.abs(input)>0])).min()).astype(int)) + (ndigit-1)
else:
ndig = 1
if (input>=10).all():
precstr = '%i'
ndig = 0
else:
precstr = '%' + ('1.%i' % ndig) + 'f'
if strout:
ret = [precstr % val for val in input]
else:
ret = np.round(input, ndig)
if scalarInput:
ret = ret[0]
return ret
def gelman_rubin(chains, verbose=False):
"""Compute the Gelman-Rubin convergence metric for MCMC chains.
:INPUTS:
chains : 2D NumPy array
A stack of all MCMC chains to be compared, created with
something like :func:`numpy.vstack`. The chains must all be
the same length, and they must have more links than the total
number of chains.
OR
chains : 3D NumPy array
N chains of L links for P parameters (e.g., the 'chain'
attribute of an emcee.sampler object), of shape NxLxP.
:OUTPUTS:
R metric. If this is 'close to 1.0,' then your chains have
converged to the same distribution. The definition of 'close'
could be 1.2, 1.1, 1.01... it's up to you!
:REFERENCE:
Eq. 20 of Gelman & Rubin 1992, Statistical Sciences, Vol. 7, p. 457
http://www.star.le.ac.uk/~sav2/idl/rhat.pro
"""
# 2014-01-23 11:14 IJMC: Created by copying from the IDL code
# located at
# http://www.star.le.ac.uk/~sav2/idl/rhat.pro
#---------------------------------------------------------
#Main routine
chains = np.array(chains, copy=False)
if 0 in chains.shape:
print "Input array cannot have any dimensions of size zero! Returning 9e99."
return np.array([9e99])
if chains.ndim==2:
if (chains.shape[0] > chains.shape[1]):
dimension = 0
else:
dimension = 1
nn = chains.shape[dimension]
#mean value of each chain
mean_j = chains.mean(axis=dimension)
#variance within each chain
var_j = chains.var(axis=dimension)
#now compute B and W from Gelman & Rubin
B = nn*mean_j.var()
W = var_j.mean()
#compute Gelman-Rubin R^2
R2 = ( W*(nn-1)/nn + B/nn ) / W
R = np.array([np.sqrt(R2)])
if verbose:
print '-- Mean of each chain: j=0,1,2,...'
print mean_j
print '-- Variance of each chain: j=0,1,2,...'
print var_j
print '-- B/N, W, R^2, R, N'
print (B/nn),W,R2,R,nn
elif chains.ndim==3:
nn = chains.shape[1]
mean_j = chains.mean(axis=1)
var_j = chains.var(axis=1)
B = nn * mean_j.var(axis=0)
W = var_j.mean(axis=0)
R2 = ( W*(nn-1)/nn + B/nn ) / W
R = np.sqrt(R2)
else:
print "Must input a 2D or 3D array! Returning 9e99."
R = np.array([9e99])
return R
def get_emcee_start(bestparams, variations, nwalkers, maxchisq, args, homein=True, retchisq=False, depth=np.inf):
"""Get starting positions for EmCee walkers.
:INPUTS:
bestparams : sequence (1D NumPy array)
Optimal parameters for your fitting function (length N)
variations : 1D or 2D NumPy array
If 1D, this should be length N and new trial positions will be
generated using numpy.random.normal(bestparams,
variations). Thus all values should be greater than zero!
If 2D, this should be size (N x N) and we treat it like a
covariance matrix; new trial positions will be generated using
numpy.random.multivariate_normal(bestparams, variations).
nwalkers : int
Number of positions to be chosen.
maxchisq : int
Maximum "chi-squared" value for a test position to be
accepted. In fact, these values are computed with
:func:`phasecurves.errfunc` as errfunc(test_position, *args)
and so various priors, penalty factors, etc. can also be
passed in as keywords.
args : tuple
Arguments to be passed to :func:`phasecurves.errfunc` for
computing 'chi-squared' values.
homein : bool
If True, "home-in" on improved fitting solutions. In the
unlikely event that a randomly computed test position returns
a better chi-squared than your specified best parameters,
reset the calculation to start from the new, improved set of
parameters.
retchisq : bool
If True, return the tuple (positions, chisq_at_positions)
:BAD_EXAMPLE:
::
pos0 = tools.get_emcee_start(whitelight_bestfit[0], np.abs(whitelight_bestfit[0])/1000., nwalkers, 10*nobs, mcargs)
"""
# 2013-05-01 11:18 IJMC: Created
# 2014-07-24 11:07 IJMC: Fixed typo in warning message.
#get_emcee_start(bestparams, variations, nwalkers, maxchisq, args):
from phasecurves import errfunc
best_chisq = errfunc(bestparams, *args)
if best_chisq >= maxchisq:
print "Specified parameter 'maxchisq' is smaller than the chi-squared value for the specified best parameters. Try increasing maxchisq."
return -1
npar = len(bestparams)
if variations.ndim==2:
usecovar = True
else:
usecovar = False
pos0 = np.zeros((nwalkers, npar), dtype=float)
chisq = np.zeros(nwalkers, dtype=float)
npos = 0
while npos < nwalkers:
if usecovar:
testpos = np.random.multivariate_normal(bestparams, variations)
else:
testpos = np.random.normal(bestparams, variations)
testchi = errfunc(testpos, *args)
if np.isfinite(testchi) and (testchi < best_chisq) and homein and depth>0:
return get_emcee_start(testpos, variations, nwalkers, maxchisq, args, homein=homein, retchisq=retchisq, depth=depth-1)
elif testchi < maxchisq:
pos0[npos] = testpos
chisq[npos] = testchi
npos += 1
if retchisq:
ret = pos0, chisq
else:
ret = pos0
return ret
def findFrac(validValues, thisValue, retinds=False):
"""
Helper tool for simply linear interpolation.
:INPUTS:
validValues : sequence
List of valid values
thisValue : scalar
Value of interest.
:OUTPUTS:
(TwoClosestValues, relativeFractions)
"""
# 2013-08-12 09:48 IJMC: Created
validValues = np.array(validValues, copy=False)
if thisValue<=validValues.min():
ret = ([validValues.min()], [1.0])
elif thisValue>=validValues.max():
ret = ([validValues.max()], [1.0])
else:
dif = np.abs(validValues - thisValue)
inds = np.argsort(dif)[0:2]
val1, val2 = validValues[inds]
c1 = np.abs(1.0*(val2-thisValue)/(val2-val1))
c2 = np.abs(1.0*(val1-thisValue)/(val2-val1))
ret = ([val1, val2], [c1, c2])
if retinds:
inds = [np.nonzero(validValues==val)[0][0] for val in ret[0]]
ret = (inds, ret[1])
return ret
def feps_interpol(x, y, a, linear=True):
"""
Wrapper script for NumPy interpolation. Culls duplicate values and
puts x into a monotonically increasing grid.
:INPUTS:
x : NumPy array
1D sequence of values defining the grid coordinates at which
the input values 'y' are defined.
y : NumPy array
1D sequence of values.
a : NumPy array
Values of 'x' at which to interpolate from the values of 'y'.
:EXAMPLE:
::
import numpy as np
x = np.linspace(-3,3,61)
v = np.sin(x)
u = np.array([-2.50, -2.25, -1.85, -1.55, -1.20, -0.85, -0.50, -0.10, \
0, 0.75, 0.85, 1.05, 1.45, 1.85, 2.00, 2.25, 2.75 ])
b = feps_interpol(x,v,u)
:NOTES:
Converted from IDL code from J. Bouwman. Documentation was:
;(SH Feb 26 1999)
;We need to make the grid mononic therefore spline needs to do
;some cleaning before execution
"""
# 2013-12-01 23:47 IJMC: Translated from IDL.
u, idx = np.unique(x, return_index=True)
xt = x[idx]
yt = y[idx]
idx2 = np.argsort(xt)
xt = xt[idx2]
yt = yt[idx2]
idx3 = np.argsort(a)
at = a[idx3]
if linear is not True:
print "nonlinear mode not implemented!"
else:
bt = np.interp(at, xt, yt)
return bt
class modelGrid:
"""Model Grid object.
"""
def __init__(self):
names = ['MIX_LEN', 'Y', 'Z', 'Zeff', '_Fe_H_', '_a_Fe_', 'ages_Gyr', 'isochrones', 'nages', 'nmags', 'photometricSystem', 'filename']
for name in names:
setattr(self, name, None)
return
class isochrone:
"""Model Isochrone object.
"""
def __init__(self):
return
def readDartmouthIsochrones(filename, mode='2012', comment='#'):
"""
Read ASCII-format Dartmouth Isochrone files into Python.
:INPUTS:
filename : str
Filename to load, e.g. 'fehm05afep0.UBVRIJHKsKp'
mode : str
Which type of models to load. For now, '2012' is the only
valid input.
comment : str
Which character(s) indicate(s) a comment, rather than
tabular/numeric data.
:OUTPUT:
A Pythonic object with fields derived from the input file. It
will have fields named 'Y', 'Z', 'Zeff', 'ages_Gyr',
'isochrones', 'photometricSystem', etc.
The 'isochrones' field is a list of isochrones, one at each age
step. It will have fields whose names correspond to photometric
bandpasses (see the example below), as well as standard fields
such as 'LogG', 'LogTeff', 'LogL_Lo', 'M_Mo', etc.
:NOTES:
You can download the models at the DSEP website:
http://stellar.dartmouth.edu/~models/
:REQUIREMENTS:
As written, requires NumPy. (But could be easily rewritten to
avoid this, if necessary).
:EXAMPLE:
::
# Reproduce Fig. 4 of Dotter et al. (2008):
import tools
import pylab as py
filename = 'fehp00afep0.UBVRIJHKsKp'
models = tools.readDartmouthIsochrones(filename, mode='2012')
age2plot = 4 # Plot 4 Gyr track
age_index = (models.ages_Gyr==age2plot).nonzero()[0]
this_isochrone = models.isochrones[age_index]
py.figure()
ax1=py.subplot(121)
py.plot(this_isochrone.LogTeff, this_isochrone.LogL_Lo)
py.xlabel('log T_eff')
py.ylabel('log L/L_sun')
py.axis([3.9, 3.4, -3, 4])
leg = legend(['DSEP'], 3)
ax2=py.subplot(122)
py.plot(this_isochrone.V - this_isochrone.I, this_isochrone.V)
py.xlabel('V - I')
py.ylabel('M_V')
py.axis([0.25, 4, 14, -2])
[ax.minorticks_on() for ax in [ax1, ax2]]
"""
# 2014-08-06 17:31 IJMC: Created
def iscomment(input):
return input.strip()[0]=='#'
def scrapeval(line, thiskey, endflag=' '):
thisind = line.find(thiskey)
endind = line.find(endflag, thisind)
return line[thisind+len(thiskey):endind]
def cleanName(input, bad='/-[]()', good='_'):
iter = 0
nchar = len(input)
inputOK = False
for badChar in bad:
input = input.replace(badChar, good)
return input
f = open(filename, 'r')
rawtext = f.readlines()
f.close()
nlines = len(rawtext)
iter = 0
ret = modelGrid()
ret.isochrones = []
ret.filename = filename
niso = 0
if mode=='2012':
# First parse the header:
while iscomment(rawtext[iter]) and not ('AGE' in rawtext[iter] and 'EEPS' in rawtext[iter]):
if 'NUMBER OF AGES' in rawtext[iter]:
ret.nages = int(scrapeval(rawtext[iter], 'AGES=', 'MAGS'))
ret.nmags = int(scrapeval(rawtext[iter], 'MAGS=', '\n'))
ret.ages_Gyr = np.zeros(ret.nages, dtype=float)
elif 'MIX-LEN' in rawtext[iter]:
param_names = rawtext[iter].strip('#').split()
iter += 1
param_vals = map(float, rawtext[iter].strip('#').split())
for name, val in zip(param_names, param_vals):
setattr(ret, cleanName(name), val)
elif 'PHOTOMETRIC SYSTEM' in rawtext[iter]:
ret.photometricSystem = scrapeval(rawtext[iter], '**PHOTOMETRIC SYSTEM**:', '\n')
iter += 1
while iter<nlines:
if ('AGE' in rawtext[iter] and 'EEPS' in rawtext[iter]):
# Then parse the table of data, beginning with the "AGE" line
iso = isochrone()
iso.age_Gyr = float(scrapeval(rawtext[iter], 'AGE=', 'EEPS'))
iso.EEPS = int(scrapeval(rawtext[iter], 'EEPS=', '\n'))
iter += 1
columnNames = map(cleanName, rawtext[iter].strip('#').split())
for name in columnNames: setattr(iso, name, np.zeros(iso.EEPS, dtype=float))
subIter = 0
ret.ages_Gyr[niso] = iso.age_Gyr
niso += 1
elif len(rawtext[iter].strip())>0:
columnValues = map(float, rawtext[iter].split())
for name, value in zip(columnNames, columnValues):
getattr(iso, name)[subIter] = value
subIter += 1
if subIter==iso.EEPS: ret.isochrones.append(iso)
iter += 1
else:
print "I don't know how to handle mode '%s' -- failing with '-1' flag." % mode
ret = -1
return ret
def addTwoMags(mag1, mag2):
"""Return the total (astronomical) magnitude of two combined sources.
:INPUTS:
mag1, mag2 : scalars or NumPy arrays.
magnitudes of the two sources.
:RETURNS:
-2.5 * np.log10(10**(-0.4*mag1) + 10**(-0.4*mag2))
"""
# 2014-08-10 15:52 IJMC: Created.
return -2.5 * np.log10(10**(-0.4*mag1) + 10**(-0.4*mag2))
def invChisq(dof, conf=0.683):
"""Compute the delta-chi^2 corresponding to the given parameters.
:INPUTS:
dof : int, dof > 1
Number of degrees of freedom (or "interesting parameters")
conf : float, 0 <= conf <= 1
Confidence level. See below for some common choices.
:RETURNS:
Desired delta-Chi-squared value.
:EXAMPLE:
::
# Reproduce Table 1 of Avni (1976)
import tools
dofs = [1, 2, 3, 4]
confs = [0.68, 0.90, 0.99]
for conf in confs:
for dof in dofs:
print ("%5.2f " % tools.invChisq(dof, conf=conf)),
print " "
:NOTES:
Some typical values for a Normal (Gaussian) distribution:
========= ================
type confidence level
========= ================
one-sigma 0.6826895
2 sigma 0.9544997
3 sigma 0.9973002
4 sigma 0.9999366
5 sigma 0.9999994
========= ================
"""
# 2014-08-11 20:23 IJMC: Created
# 2014-10-23 22:26 IJMC: New starting guess for fsolve.
from scipy.optimize import fsolve
from scipy.stats import chisqprob
if conf==0:
return 0
else:
def minFunc(dchi): return chisqprob(dchi, dof) - (1.-conf)
out = fsolve(minFunc, dof)
try:
ret = out[0]
except:
ret = out
return ret
def resampleIsochrone(iso, x, xName='M_Mo', fields='*all*'):
"""Resample parameters of an isochrone-type object.
:INPUTS:
iso : object
Isochrone sub-field object of a stellar model-grid object, of
the type returned by :func:`readDartmouthIsochrones`.
x : 1D NumPy array
The new values to use for interpolation or resampling.
xName : string
The name of the field in 'isochrone.'
fields : string
Which fields to resample. If '*all*', all sub-fields of
'isochrone' with the same size as getattr(isochrone, xName)
will be resampled.
:NOTES:
We use numpy.interp() for the resampling; if 'x' is not an
always-increasing array then interp() may have
problems. Similarly, interp() offers no extrapolation beyond the
original limits of iso.xName.
"""
# 2014-08-12 14:14 IJMC: Created.
if fields =='*all*':
fields = dir(iso)
newIso = isochrone()
xOld = getattr(iso, xName)
for field in dir(iso):
thisField = getattr(iso,field)
if field==xName:
setattr(newIso, field, x)
elif (field in fields) and hasattr(thisField, '__iter__') and len(thisField)==len(xOld):
setattr(newIso, field, np.interp(x, xOld, thisField))
#spline = interpolate.UnivariateSpline(xOld, thisField, k=1, s=0.)
#setattr(newIso, field, spline(x))
else:
setattr(newIso, field, thisField)
return newIso
def obj2FITS(input):
"""Try to convert any generic object into a multi-extension FITS
file. We examine each attribute of the object: if a string or
scalar, we addit to the Primary HDU header. If a sequence of
numbers, we add it as an extension with the appropriate name.
It probably wouldn't work for heirarchical objects whose
attributes are themselves complicated objects; probably also won't
work for complex-valued arrays.
"""
# 2014-08-15 22:26 IJMC: Created.
# Copied straight from the PyFits source code:
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
import re
badchars = r'[ -~]*'+'\n'+'\Z'
_ascii_text_re = re.compile(r'[ -~]*\Z')
try:
from astropy.io import fits as pyfits
except:
import pyfits
fields = dir(input)
hdu1 = pyfits.PrimaryHDU([0])
hdu1.header['OBJ2FITS'] = 'Input object converted to FITS file.'
hdus = [hdu1]
for field in fields:
#pdb.set_trace()
val = getattr(input, field)
try:
junk = float(val)
valIsScalar = not isinstance(val, np.ndarray)
except:
valIsScalar = False
if valIsScalar:
hdu1.header[field] = val
else:
# Copied straight from the PyFits source code:
try:
text_value = val.decode('ascii')
except:
validString = False
else:
# Check against the printable characters regexp as well
for badchar in badchars:
text_value = text_value.replace(badchar, '')
m = _ascii_text_re.match(text_value)
validString = m is not None
if validString:
hdu1.header[field] = text_value
else:
try:
junk = np.array(val) + 0
valIsNumericalArray = True
hdus.append(pyfits.ImageHDU(val, name=field))
hdu1.header[field] = 'Extension %i' % (len(hdus)-1)
except:
valIsNumericalArray = False
hdu1.header[field] = 'Could not extract field from input object.'
return pyfits.HDUList(hdus)
def cardUndefined(headercard):
try:
from astropy.io import fits as pyfits
except:
import pyfits
return (headercard is pyfits.card.UNDEFINED) or \
(headercard is pyfits.card.Undefined)
def headerToDict(header):
"""Convert a PyFITS header into a standard NumPy dict.
"""
# 2014-08-27 11:33 IJMC: Created
ret = dict()
comments = dict()
for card in header.cards:
key, val, comment = card
if cardUndefined(val):
val = ''
ret[key] = val
comments[key] = comment
ret['comments'] = comments
return ret
def contourg(*args, **kw):
"""Plot filled contours of irregularly-spaced data.
:USAGE:
First three inputs must use syntax 'contour(X,Y,Z)'.
Can set "nbinx=50" or "nbiny=50" for number of bins.
Otherwise, syntax is the same as for matplotlib's 'contourf'.
:EXAMPLE:
::
import tools
import numpy as np
:SEE_ALSO:
:func:`plotc`
"""
# 2014-10-06 10:42 IJMC: Created
from pylab import contourf
from scipy.interpolate import griddata
if 'nbinx' in kw.keys():
nbinx = kw.pop('nbinx')
else:
nbinx = 50
if 'nbiny' in kw.keys():
nbiny = kw.pop('nbiny')
else:
nbiny = 50
x0, y0, z0 = args[0:3]
finite = np.isfinite(x0) * np.isfinite(y0) * np.isfinite(z0)
x1 = x0[finite]
y1 = y0[finite]
z1 = z0[finite]
x = np.linspace(x1.min(), x1.max(), nbinx)
y = np.linspace(y1.min(), y1.max(), nbiny)
z = griddata((x1, y1), z1, (x[None,:], y[:,None]), method='linear')
cargs = (x, y, z) + args[3:]
return contourf(*cargs, **kw)
def equiv_width(wavelength, spectrum, feature_location, blue_continuum, red_continuum, err_spectrum=None, oversamp=10, continuum_order=1, plotfig=False):
"""
Measure the Equivalent Width (EW) of a specified feature in a spectrum.
:INPUTS:
wavelength (1d NumPy array)
spectrum (1d NumPy array)
feature_location (2-sequence)
Shortest and longest wavelength values to be included in the
feature to be measured.
blue_continuum (2-sequence)
Shortest and longest wavelength values to use for the blue
side of the continuum.
red_continuum (2-sequence)
Shortest and longest wavelength values to use for the red
side of the continuum.
:OPTIONS:
err_spectrum (1d NumPy array)
Uncertainties on 'spectrum' input.
oversamp (int)
Factor to oversample spectrum when measuring flux. 10 is usually fine.
continuum_order (positive int)
Polynomial order (for numpy.polyfit) to measure continuum.
plotfig (bool)
If True, plot a pretty picture.
:RETURNS:
EW, err_EW
:NOTES:
EW of emission lines will be negative.
Spectrum, feature_location, blue_continuum, and red_continuum
must all have the same units!
"""
# 2015-07-15 16:04 IJMC: Created.
import pylab as py
if oversamp is None or oversamp<=1:
oversamp = 1
if continuum_order is None or oversamp<=0:
oversamp = 0
if err_spectrum is None:
err_spectrum = np.ones(spectrum.size)
if oversamp>1: # Resample spectrum:
n = wavelength.size
index = np.arange(n)
index_new = np.linspace(0, n-1, int(n*oversamp))
wavelength_new = np.interp(index_new, index, wavelength)
spectrum = np.interp(wavelength_new, wavelength, spectrum)
err_spectrum = np.interp(wavelength_new, wavelength, err_spectrum) * np.sqrt(oversamp)
wavelength = wavelength_new
# Construct Boolean index arrays for continuum and feature:
blue_cont = (wavelength >= blue_continuum[0]) * \
(wavelength < blue_continuum[1])
red_cont = (wavelength >= red_continuum[0]) * \
(wavelength < red_continuum[1])
continuum_region = blue_cont + red_cont
feature_region = (wavelength >= feature_location[0]) * \
(wavelength < feature_location[1])
# Basic error-checking:
if continuum_region.sum()==0:
print "Continuum regions were too narrow! No datapoints found. Try again with bigger continuum regions."
return np.nan
if not feature_region.any():
print "Specified region for feature was too narrow! No datapoints found. Try again with wider wavelength range."
return np.nan
# Fit for continuum:
continuum_fit = np.polyfit(wavelength[continuum_region], spectrum[continuum_region], continuum_order)
continuum = np.polyval(continuum_fit, wavelength)
# Determine the width of each pixel
dwave_centers = np.diff(wavelength)
wavelength_centers = 0.5 * (wavelength[1:] + wavelength[0:-1])
dwave_fit = np.polyfit(wavelength_centers, dwave_centers, 2)
dwave = np.polyval(dwave_fit, wavelength)
# Definition of E.W.:
ew = ((1. - spectrum/continuum)*dwave)[feature_region].sum()
err_ew = np.sqrt((((err_spectrum/continuum)*dwave)[feature_region]**2).sum())
if plotfig:
allbounds = np.concatenate((blue_continuum, red_continuum, feature_location))
bounds_min, bounds_max = allbounds.min(), allbounds.max()
bounds_mean = 0.5*(bounds_min + bounds_max)
dbounds = (bounds_max-bounds_min)*0.55
plotrange = bounds_mean - dbounds, bounds_mean + dbounds
plotind = (wavelength >= plotrange[0]) * (wavelength < plotrange[1])
py.figure()
ax1 = py.subplot(211)
py.plot(wavelength[plotind], spectrum[plotind], color='gray')
py.plot(wavelength[blue_cont], spectrum[blue_cont], '-b', linewidth=2)
py.plot(wavelength[red_cont], spectrum[red_cont], '-r', linewidth=2)
py.plot(wavelength[continuum_region], continuum[continuum_region], '--k')
py.plot(wavelength[feature_region], spectrum[feature_region], '-k', linewidth=2)
ax2 = py.subplot(212)
py.plot(wavelength[plotind], (spectrum/continuum)[plotind], color='gray')
py.plot(wavelength[blue_cont], (spectrum/continuum)[blue_cont], '-b', linewidth=2)
py.plot(wavelength[red_cont], (spectrum/continuum)[red_cont], '-r', linewidth=2)
py.plot(wavelength[feature_region], (spectrum/continuum)[feature_region], '-k', linewidth=2)
py.plot(wavelength[plotind], [1]*plotind.sum(), '--k')
[ax.set_xlim(plotrange) for ax in [ax1, ax2]]
[ax.minorticks_on() for ax in [ax1, ax2]]
py.xlabel('Wavelength')
ax1.set_ylabel('Flux')
ax2.set_ylabel('Normalized Flux')
return ew, err_ew
def set_ticks_both(axis):
ticks = list( axis.majorTicks ) # a copy
ticks.extend( axis.minorTicks )
for t in ticks:
t.tick1On = True # tick marker on left (or bottom)
t.tick2On = True # tick marker on right (or top)
t.label1On = True # tick label marker on left (or bottom)
t.label2On = True # tick label on right (or top)
return
def isstring(input):
""" Determine whether input is a string or string-like."""
#2015-08-28 03:54 IJMC: Created
try:
junk = input + ' '
ret = True
except:
ret = False
return ret
def isnumeric(input):
"""Determine whether input is a number or number-like (or a string representation thereof)."""
# 2016-01-20 11:48 IJMC: Created
try:
junk = float(input) + 0.
ret = True
except:
ret = False
return ret
def dumleg(ax, nums, labs=None, **kw):
"""Create a quick dummy legend: empty shapes with given sizes & labels.
**kw --- keywords passed on to matplotlib.legend()
e.g., dumleg(ax1, [3, 10, 30], loc=3, numpoints=1)
"""
# 2016-12-02 20:12 IJMC: Created
ai = np.argsort(nums)
nums = np.array(nums)[ai]
if labs is None:
labs = map(str, nums)
else:
labs = np.array(labs)[ai]
axlim = ax.axis()
for num,lab in zip(nums, labs): ax.plot([-1], [-1], 'ok', ms=num, mfc='white', label=lab)
ax.axis(axlim)
leg = ax.legend(**kw)
return leg
| {
"repo_name": "iancrossfield/aries_reduce",
"path": "tools.py",
"copies": "1",
"size": "148551",
"license": "mit",
"hash": -2529292207197001000,
"line_mean": 30.0971320913,
"line_max": 241,
"alpha_frac": 0.5617397392,
"autogenerated": false,
"ratio": 3.3973150985683573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44590548377683575,
"avg_score": null,
"num_lines": null
} |
"""A collection of tools to handle genomic tests, in particular Tajima's D.
Part of the biostructmap package.
"""
from __future__ import absolute_import, division, print_function
from io import StringIO
import warnings
from math import log
from Bio import AlignIO
from Bio.Data import IUPACData
from numpy import mean
import dendropy
from .seqtools import _sliding_window_var_sites, check_for_uncertain_bases
from .population_stats import calculate_tajimas_d, calculate_nucleotide_diversity
from .population_stats import calculate_wattersons_theta
def shannon_entropy(alignment, table='Standard',
protein_letters=IUPACData.protein_letters,
normalized=False, gap='-'):
'''
Calculate mean Shannon entropy for all residues in a genomic alignment.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
table: A codon lookup table used by the Bio.Seq.translate() method.
See BioPython docs for possible options.
protein_letters (str, optional): String of all protein letters being
used to define the amino acid alphabet. Defaults to standard 20
amino acids. If another alphabet is used (if your sequence contains
non-standard amino acid), then the maximum Shannon entropy values
will change accordingly.
normalized (bool): Normalize such the entropy is in the range [0, 1]
Returns:
float: Shannon entropy value.
'''
if isinstance(alignment, str):
alignment = AlignIO.read(StringIO(alignment), format='fasta')
if is_empty_alignment(alignment):
return None
translated_positions = list(zip(*[str(x.seq.translate(table=table, gap=gap)) for
x in alignment]))
not_in_alphabet = set([res for x in translated_positions
for res in x]).difference(protein_letters)
if not_in_alphabet:
warnings.warn("Multiple sequence alignment contains residues that aren't "\
"in the provided alphabet. Entropy values will not be "\
"accurate - consider supplying an extended amino acid "\
"alphabet to the `protein_letters` keyword argument. "\
"Offending residue(s) are: {res}".format(res=str(not_in_alphabet)))
entropy_values = [_calculate_shannon_entropy(x, protein_letters, normalized)
for x in translated_positions]
entropy = mean(entropy_values)
return entropy
def _calculate_shannon_entropy(seq, protein_letters=IUPACData.protein_letters,
normalized=False):
'''
Calculate Shannon entropy from a set of residues from a single position.
Args:
seq (str): Set of amino acid residues, assuming standard extended alphabet.
protein_letters (str, optional): String of all protein letters being
used to define the amino acid alphabet. Defaults to standard 20
amino acids. If another alphabet is used (if your sequence contains
non-standard amino acid), then the maximum Shannon entropy values
will change accordingly.
normalized (bool): Normalize such the entropy is in the range [0, 1]
Returns:
entropy (float): Shannon entropy.
'''
# Normalization involves dividing entropy values by the maximum entropy,
# which is mathematically the same as changing the base of the logarithm
# to be the size of the protein alphabet.
if normalized:
base = len(protein_letters)
else:
base = 2
letters = protein_letters
entropy = -sum((0 if seq.count(letter) == 0 else
seq.count(letter) / len(seq) *
log(seq.count(letter)/len(seq), base) for letter in letters))
return entropy
def tajimas_d(alignment, window=None, step=3):
"""
Uses DendroPy package to calculate Tajimas D.
Notes:
Several optimisations are used to speed up the calculation, including
memoisation of previous window result, which is used if no new
polymorphic sites are added/subtracted.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
window (int, optional): The size of the sliding window over which
Tajima's D is calculated. Default is None, in which case a
single Tajima's D value is calculated for the multiple sequence
alignment.
step (int, optional): Step size for sliding window calculation.
Default step size of 3 (ie. one codon).
Returns:
float/dict: If window parameter is None, returns a single value for
Tajima's D. Otherwise a dict mapping genome window midpoint to
calculated Tajima's D values is returned.
"""
if window:
if isinstance(alignment, str):
alignment = AlignIO.read(StringIO(alignment), 'fasta')
results = {}
prev_win = None
prev_d = None
slide = _sliding_window_var_sites(alignment, window, step=step)
for i, win in enumerate(slide):
centre = i*step + 1 + (window-1)/2
if win == prev_win:
results[centre] = prev_d
else:
current_d = _tajimas_d(win)
results[centre] = current_d
prev_d = current_d
prev_win = win
return results
else:
return _tajimas_d(alignment)
def _tajimas_d_old(alignment):
"""
Uses DendroPy to calculate tajimas D.
If Tajima's D is undefined (ie. Dendropy Tajima's D method raises a
ZeroDivisionError), then this method returns None.
Note: This approach is SLOW for large numbers of sequences.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Tajima's D value. Returns None if Tajima's D is undefined.
"""
if not isinstance(alignment, str):
data = alignment.format('fasta')
else:
data = alignment
if is_empty_alignment(alignment):
return None
try:
seq = dendropy.DnaCharacterMatrix.get(data=data,
schema='fasta')
taj_d = dendropy.calculate.popgenstat.tajimas_d(seq)
except ZeroDivisionError:
taj_d = None
return taj_d
def _tajimas_d(alignment):
'''A faster Tajima's D calculation.
If Tajima's D is undefined (ie. Tajima's D method raises a
ZeroDivisionError), then this method returns None.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Tajima's D value. Returns None if Tajima's D is undefined.
'''
if is_empty_alignment(alignment):
return None
try:
seq = [str(x.seq) for x in alignment]
except AttributeError:
alignment = AlignIO.read(StringIO(alignment), 'fasta')
seq = [str(x.seq) for x in alignment]
try:
if check_for_uncertain_bases(seq):
taj_d = _tajimas_d_old(alignment)
else:
taj_d = calculate_tajimas_d(seq)
except ZeroDivisionError:
taj_d = None
return taj_d
def nucleotide_diversity(alignment):
"""
A faster nucleotide diversity calculation (compared to DendroPy).
If nucleotide diversity is undefined, returns None.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Nucleotide diversity value. Returns None if nucleotide
diversity is undefined.
"""
if is_empty_alignment(alignment):
return None
try:
seq = [str(x.seq) for x in alignment]
except AttributeError:
alignment = AlignIO.read(StringIO(alignment), 'fasta')
seq = [str(x.seq) for x in alignment]
try:
if check_for_uncertain_bases(seq):
diversity = nucleotide_diversity_old(alignment)
else:
diversity = calculate_nucleotide_diversity(seq)
except ZeroDivisionError:
diversity = None
return diversity
def nucleotide_diversity_old(alignment):
"""
Use DendroPy to calculate nucleotide diversity.
If nucleotide diversity is undefined, returns None.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Nucleotide diversity value. Returns None if nucleotide
diversity is undefined.
"""
if is_empty_alignment(alignment):
return None
if not isinstance(alignment, str):
data = alignment.format('fasta')
else:
data = alignment
seq = dendropy.DnaCharacterMatrix.get(data=data, schema='fasta')
diversity = dendropy.calculate.popgenstat.nucleotide_diversity(seq)
return diversity
def is_empty_alignment(alignment):
"""Returns True if alignment is empty.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
bool: True if alignment is empty, False otherwise.
"""
if isinstance(alignment, str) and len(alignment.split('\n')[1]) == 0:
return True
elif not alignment or len(alignment[0]) == 0:
return True
else:
return False
def wattersons_theta(alignment):
"""
A faster Watterson's Theta calculation (compared to DendroPy).
If Watterson's Theta is undefined, returns None.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Watterson's Theta value. Returns None if Watterson's Theta is
undefined.
"""
if is_empty_alignment(alignment):
return None
try:
seq = [str(x.seq) for x in alignment]
except AttributeError:
alignment = AlignIO.read(StringIO(alignment), 'fasta')
seq = [str(x.seq) for x in alignment]
try:
if check_for_uncertain_bases(seq):
theta = wattersons_theta_old(alignment)
else:
theta = calculate_wattersons_theta(seq)
except ZeroDivisionError:
theta = None
return theta
def wattersons_theta_old(alignment):
"""
Use DendroPy to calculate Watterson's Theta.
If Watterson's Theta is undefined, returns None.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object, either as a Bio.Align.MultipleSequenceAlignment or a
biostructmap.SequenceAlignment object.
Returns:
float: Watterson's Theta value. Returns None if Watterson's Theta is
undefined.
"""
if not isinstance(alignment, str):
data = alignment.format('fasta')
else:
data = alignment
if is_empty_alignment(alignment):
return None
seq = dendropy.DnaCharacterMatrix.get(data=data, schema='fasta')
theta = dendropy.calculate.popgenstat.wattersons_theta(seq)
return theta
| {
"repo_name": "andrewguy/biostructmap",
"path": "biostructmap/gentests.py",
"copies": "1",
"size": "12715",
"license": "mit",
"hash": -7670846991746990000,
"line_mean": 37.0688622754,
"line_max": 89,
"alpha_frac": 0.6528509634,
"autogenerated": false,
"ratio": 4.134959349593496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012787025525032158,
"num_lines": 334
} |
'''A collection of tools to handle sequence manipulation.
Part of the biostructmap package.
'''
from __future__ import absolute_import, division, print_function
from io import StringIO
import operator
import re
import subprocess
import tempfile
import warnings
from Bio import AlignIO
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast import NCBIXML
from Bio.pairwise2 import align
from Bio.SubsMat import MatrixInfo as matlist
from Bio.Seq import Seq
#Use local BLAST+ installation. Falls back to pairwise2 if False.
LOCAL_BLAST = True
#Use local exonerate installation to align dna to protein sequences.
#Falls back to a basic method using either BLAST+ or pairwise2 if False,
#but won't take into consideration introns or frameshift mutations.
LOCAL_EXONERATE = True
def _sliding_window(seq_align, window, step=3, fasta_out=False):
'''
Generate a Multiple Sequence Alignment over a sliding window.
Input is either filehandle, or Bio.AlignIO multiple sequence alignment
object.
Args:
seq_align: A multiple sequence alignment. Either a filehandle, or
Bio.AlignIO multiple sequence alignment object.
window (int): Sliding window width
step (int, optional): Step size to increment each window. Default of 3.
fasta_out (bool): If True, output will be a fasta formatted string. If
False, then output will be an AlignIO object.
Yields:
str/MultipleSequenceAlignment: The next window in the sliding window
series for the original multiple sequence alignment.
'''
try:
alignments = AlignIO.read(seq_align, 'fasta')
except (AttributeError, ValueError):
alignments = seq_align
#Length of alignments
length = len(alignments[0])
for i in range(0, length-window, step):
alignment = alignments[:, i:i+window]
if fasta_out:
alignment = alignment.format('fasta')
yield alignment
def _sliding_window_var_sites(seq_align, window, step=3):
'''
Generate a Multiple Sequence Alignment over a sliding window, only
including polymorphic sites in the alignment.
Notes:
Returns an empty MultipleSequenceAlignment object if no polymorphic
sites are found within the window.
Args:
seq_align: A multiple sequence alignment. Either a filehandle, or
Bio.AlignIO multiple sequence alignment object.
window (int): Sliding window width
step (int, optional): Step size to increment each window. Default of 3.
fasta_out (bool): If True, output will be a fasta formatted string. If
False, then output will be an AlignIO object.
Yields:
MultipleSequenceAlignment: The next window in the sliding window
series for the original multiple sequence alignment, with
only polymorphic sites displayed.
'''
try:
alignments = AlignIO.read(seq_align, 'fasta')
except (AttributeError, ValueError):
alignments = seq_align
#Length of alignments
length = len(alignments[0])
align_dict = _var_site(alignments)
#Create first window
initial_sites = {key:value for (key, value) in align_dict.items()
if key < window}
#Small hack to set type of 'initial_sites' variable if no alignments fall
#within initial window
initial_sites[-1] = alignments[:, 0:0]
alignment = _join_alignments(initial_sites)
yield alignment
# Add/remove sites from the end/start of window as appropriate.
for i in range(0, (length-window), step):
for j in range(step):
if i + j in align_dict:
alignment = alignment[:, 1:]
if i + j + window in align_dict:
alignment = alignment + align_dict[i+j+window]
yield alignment
def _var_site(alignment):
'''
Take a multiple sequence alignment object and return polymorphic sites in a
dictionary object.
This function is used to simplify the input to a tajima's D calculation.
Args:
alignment: A multiple sequence alignment object.
Returns:
dict: A dictionary containing polymorphic sites (value) accessed by
position in the alignment (key).
'''
result = {}
for i in range(len(alignment[0])):
site = alignment[:, i]
#Check if string contains a single character. Most efficient method
#found so far.
if site != len(site) * site[0]:
result[i] = alignment[:, i:i+1]
return result
def _join_alignments(align_dict):
'''
Take a dictionary of multiple sequence alignments, and join according to
dictionary key order (generally position in sequence).
Args:
align_dict (dict): A dictionary containing single-site multiple sequence
alignment objects accessed by position in original alignment.
Returns:
MultipleSequenceAlignment: A multiple sequence alignment object
containing all polymorphic sites.
'''
output = None
for key in sorted(align_dict):
if not output:
output = align_dict[key]
else:
output = output + align_dict[key]
return output
def align_protein_sequences(comp_seq, ref_seq):
'''
Perform a pairwise alignment of two sequences.
Uses BLAST+ if LOCAL_BLAST is set to True, otherwise uses Bio.pairwise2.
Args:
comp_seq (str): A comparison protein sequence.
ref_seq (str): A reference protein sequence.
Returns:
dict: A dictionary mapping comparison sequence numbering (key) to
reference sequence numbering (value)
dict: A dictionary mapping reference sequence numbering (key) to
comparison sequence numbering (value)
'''
if LOCAL_BLAST:
return blast_sequences(comp_seq, ref_seq)
else:
return pairwise_align(comp_seq, ref_seq)
def align_protein_to_dna(prot_seq, dna_seq):
'''
Aligns a protein sequence to a genomic sequence.
If LOCAL_EXONERATE flag is set to True, takes into consideration
introns, frameshifts and reverse-sense translation if using Exonerate.
If LOCAL_EXONERATE flag is set to False, then a simple translation and
pairwise alignment is performed, and does not consider introns, frameshifts
or reverse-sense translations.
Note:
This method uses the external program Exonerate:
http://www.ebi.ac.uk/about/vertebrate-genomics/software/exonerate
This needs to be installed in the users PATH.
Args:
prot_seq (str): A protein sequence.
dna_seq (str): A genomic or coding DNA sequence
Returns:
dict: A dictionary mapping protein residue numbers to codon positions:
e.g. {3:(6,7,8), 4:(9,10,11), ...}
'''
if LOCAL_EXONERATE:
return _align_prot_to_dna_exonerate(prot_seq, dna_seq)
else:
return _align_prot_to_dna_no_exonerate(prot_seq, dna_seq)
def _align_prot_to_dna_no_exonerate(prot_seq, dna_seq):
'''
Aligns a protein sequence to a genomic sequence. Does not take consider
introns, frameshifts or reverse-sense translation.
If these are required, should use Exonerate method instead.
Args:
prot_seq (str): A protein sequence.
dna_seq (str): A genomic or coding DNA sequence
Returns:
dict: A dictionary mapping protein residue numbers to codon positions:
e.g. {3:(6,7,8), 4:(9,10,11), ...}
'''
#Translate DNA sequence to protein sequence
dna_prot_seq = str(Seq(dna_seq).translate())
#Use existing methods to align protein-protein
prot_dna_dict, _ = align_protein_sequences(prot_seq, dna_prot_seq)
#Convert output to protein: codon dict
protein_to_codons = {key: (value*3-2, value*3-1, value*3) for
key, value in prot_dna_dict.items()}
return protein_to_codons
def pairwise_align(comp_seq, ref_seq):
'''
Perform a pairwise alignment of two sequences.
Uses the BioPython pairwise2 module with the BLOSUM62 matrix for scoring
similarity. Gap opening penalty is -11 and gap extend penalty is -1,
which is the same as the default blastp parameters.
Output is two dictionaries: residue numbering in PDB chain (key) mapped to
the residue position in the reference sequence (value), and vice versa.
Args:
comp_seq (str): A comparison protein sequence.
ref_seq (str): A reference protein sequence.
Returns:
dict: A dictionary mapping comparison sequence numbering (key) to
reference sequence numbering (value)
dict: A dictionary mapping reference sequence numbering (key) to
comparison sequence numbering (value)
'''
alignment = align.globalds(comp_seq, ref_seq, matlist.blosum62, -11, -1,
penalize_end_gaps=False, one_alignment_only=True)[0]
query_string = alignment[0]
sbjct_string = alignment[1]
#Create dictionary mapping position in PDB chain to position in ref sequence
pdb_to_ref = {}
ref_to_pdb = {}
key = 1
ref = 1
for i, res in enumerate(query_string):
if res.isalpha() and sbjct_string[i].isalpha():
pdb_to_ref[key] = ref
ref_to_pdb[ref] = key
key += 1
ref += 1
elif res.isalpha():
key += 1
elif sbjct_string[i].isalpha():
ref += 1
return pdb_to_ref, ref_to_pdb
def blast_sequences(comp_seq, ref_seq):
'''
Perform BLAST of two protein sequences using NCBI BLAST+ package.
Output is two dictionaries: residue numbering in PDB chain (key) mapped to
the residue position in the reference sequence (value), and vice versa.
Notes:
User must have NCBI BLAST+ package installed in user's PATH.
Args:
comp_seq (str): A comparison protein sequence.
ref_seq (str): A reference protein sequence.
Returns:
dict: A dictionary mapping comparison sequence numbering (key) to
reference sequence numbering (value)
dict: A dictionary mapping reference sequence numbering (key) to
comparison sequence numbering (value)
'''
with tempfile.NamedTemporaryFile(mode='w') as comp_seq_file, \
tempfile.NamedTemporaryFile(mode='w') as ref_seq_file:
comp_seq_file.write(">\n" + str(comp_seq) + "\n")
ref_seq_file.write(">\n" + str(ref_seq) + "\n")
ref_seq_file.flush()
comp_seq_file.flush()
blastp_cline = NcbiblastpCommandline(query=comp_seq_file.name,
subject=ref_seq_file.name,
evalue=0.001, outfmt=5)
alignment, _stderror = blastp_cline()
blast_xml = StringIO(alignment)
blast_record = NCBIXML.read(blast_xml)
temp_score = 0
high_scoring_hsp = None
#Retrieve highest scoring HSP
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.score > temp_score:
temp_score = hsp.score
high_scoring_hsp = hsp
#Create dictionary mapping position in PDB chain to position in ref sequence
pdb_to_ref = {}
ref_to_pdb = {}
if high_scoring_hsp is not None:
query_string = high_scoring_hsp.query
sbjct_string = high_scoring_hsp.sbjct
key = high_scoring_hsp.query_start
ref = high_scoring_hsp.sbjct_start
for i, res in enumerate(query_string):
if res.isalpha() and sbjct_string[i].isalpha():
pdb_to_ref[key] = ref
ref_to_pdb[ref] = key
key += 1
ref += 1
elif res.isalpha():
key += 1
elif sbjct_string[i].isalpha():
ref += 1
return pdb_to_ref, ref_to_pdb
def _construct_sub_align_from_chains(alignments, codons, fasta=False):
'''
Take a list of biostructmap multiple sequence alignment objects, and
return a subset of codons based on an input list in the form
[('A',(1,2,3)),('B',(4,5,6)),...].
Notes:
Codons should be 1-indexed, not 0-indexed.
Args:
alignment (dict): A dictionary of multiple sequence alignment objects
accessed by a tuple of chain ids for each alignment.
codons (list): a subset of codons in a list of the form [(1,2,3),...]
fasta (bool, optional): If True, will return multiple sequence
alignment as a string in FASTA format.
Returns:
MulitpleSequenceAlignment: A subset of the initial alignment as a
multiple sequence alignment object. If the fasta kwarg is set to
True, returns a string instead.
'''
chain_alignments = {}
chain_strains = {}
for key, alignment in alignments.items():
chain_alignments[key] = alignment.get_alignment_position_dict()
chain_strains[key] = alignment.get_isolate_ids()
codons = [(chain_id, x) for chain_id, sublist in codons for x in sublist]
sub_align = []
for codon in codons:
#List is zero indexed, hence the need to call codon-1
sub_align.append(list(chain_alignments[codon[0]][codon[1]-1]))
_sub_align_transpose = zip(*sub_align)
sub_align_transpose = [''.join(x) for x in _sub_align_transpose]
if fasta:
strains = list(chain_strains.values())[0]
if sub_align_transpose:
fasta_out = ''.join('>{}\n{}\n'.format(*t) for t in
zip(strains, sub_align_transpose))
else:
fasta_out = ''.join('>{}\n\n'.format(strain) for strain in strains)
return fasta_out
return sub_align_transpose
def _construct_protein_sub_align_from_chains(alignments, residues, fasta=False):
'''
Take a list of biostructmap multiple sequence alignment objects, and
return a subset of residues based on an input list in the form
[('A', 1), ('B', 4), ...].
Notes:
Residues should be 1-indexed, not 0-indexed.
Args:
alignment (dict): A dictionary of multiple sequence alignment objects
accessed by a tuple of chain ids for each alignment.
residues (list): a subset of codons in a list of the form [(1,2,3),...]
fasta (bool, optional): If True, will return multiple sequence
alignment as a string in FASTA format.
Returns:
MulitpleSequenceAlignment: A subset of the initial alignment as a
multiple sequence alignment object. If the fasta kwarg is set to
True, returns a string instead.
'''
chain_alignments = {}
chain_strains = {}
for key, alignment in alignments.items():
chain_alignments[key] = alignment.get_alignment_position_dict()
chain_strains[key] = alignment.get_isolate_ids()
sub_align = []
for residue in residues:
#List is zero indexed, hence the need to call codon-1
sub_align.append(list(chain_alignments[residue[0]][residue[1]-1]))
_sub_align_transpose = zip(*sub_align)
sub_align_transpose = [''.join(x) for x in _sub_align_transpose]
if fasta:
strains = list(chain_strains.values())[0]
if sub_align_transpose:
fasta_out = ''.join('>{}\n{}\n'.format(*t) for t in
zip(strains, sub_align_transpose))
else:
fasta_out = ''.join('>{}\n\n'.format(strain) for strain in strains)
return fasta_out
return sub_align_transpose
def _construct_sub_align(alignment, codons, fasta=False):
'''
Take a biostructmap multiple sequence alignment object, and return a
subset of codons based on an input list in the form [(1,2,3),(4,5,6),...].
Notes:
Codons should be 1-indexed, not 0-indexed.
Args:
alignment: A multiple sequence alignment object.
codons (list): a subset of codons in a list of the form [(1,2,3),...]
fasta (bool, optional): If True, will return multiple sequence
alignment as a string in FASTA format.
Returns:
MulitpleSequenceAlignment: A subset of the initial alignment as a
multiple sequence alignment object. If the fasta kwarg is set to
True, returns a string instead.
'''
alignments = alignment.get_alignment_position_dict()
strains = alignment.get_isolate_ids()
codons = [x for sublist in codons for x in sublist]
sub_align = []
for codon in codons:
#List is zero indexed, hence the need to call codon-1
sub_align.append(list(alignments[codon-1]))
_sub_align_transpose = zip(*sub_align)
sub_align_transpose = [''.join(x) for x in _sub_align_transpose]
if fasta:
fasta_out = ''.join('>{}\n{}\n'.format(*t) for t in
zip(strains, sub_align_transpose))
return fasta_out
return sub_align_transpose
def check_for_uncertain_bases(alignment):
'''
Check for uncertain or missing base pairs in a multiple sequence alignment.
Args:
alignment (list): A multiple sequence alignment as a list of sequence
strings.
Returns:
bool: True if alignment contains bases other than A, C, G or T.
'''
accepted_bases = 'ACGTacgt'
for sequence in alignment:
for base in sequence:
if base not in accepted_bases:
warnings.warn("Multiple sequence alignment contains uncertain "\
"or missing bases: using DendroPy implementation of population "\
"statistics, which is slower. Also, DendroPy treatment of uncertain "\
"bases is not guaranteed to be correct, and it suggested the user filter "\
"out uncertain bases before running BioStructMap.")
return True
return False
def _align_prot_to_dna_exonerate(prot_seq, dna_seq):
'''
Aligns a protein sequence to a genomic sequence. Takes into consideration
introns, frameshifts and reverse-sense translation.
Note:
This method uses the external program Exonerate:
http://www.ebi.ac.uk/about/vertebrate-genomics/software/exonerate
This needs to be installed in the users PATH.
Args:
prot_seq (str): A protein sequence.
dna_seq (str): A genomic or coding DNA sequence
Returns:
dict: A dictionary mapping protein residue numbers to codon positions:
e.g. {3:(6,7,8), 4:(9,10,11), ...}
'''
#TODO Use Biopython exonerate parser. Didn't realise that existed when I wrote this parser.
with tempfile.NamedTemporaryFile(mode='w') as protein_seq_file, \
tempfile.NamedTemporaryFile(mode='w') as dna_seq_file:
protein_seq_file.write(">\n" + prot_seq + "\n")
dna_seq_file.write(">\n" + dna_seq + "\n")
dna_seq_file.flush()
protein_seq_file.flush()
#If protein sequence length is small, then exonerate score needs
#to be adjusted in order to return alignment.
#With a length n, a perfect match would score 5n.
#Hence we make a threshold of 3n (60%).
exonerate_call = ["exonerate",
"--model", "protein2genome",
"--showalignment", "False",
"--showvulgar", "True",
protein_seq_file.name,
dna_seq_file.name]
if len(prot_seq) < 25:
threshold = str(len(prot_seq) * 3)
exonerate_call.append("--score")
exonerate_call.append(threshold)
alignment = subprocess.check_output(exonerate_call)
vulgar_re = re.search(r"(?<=vulgar:).*(?=\n)",
alignment.decode("utf-8"))
if not vulgar_re:
raise UserWarning("Did not find exonerate alignment.")
vulgar_format = vulgar_re.group(0)
protein_start = vulgar_format.split()[0]
dna_start = vulgar_format.split()[3]
matches = vulgar_format.split()[7:]
direction = vulgar_format.split()[5]
protein_count = int(protein_start)
dna_count = int(dna_start)
if direction == "+":
step = operator.add
elif direction == "-":
step = operator.sub
dna_count += 1
else:
raise UserWarning("Exonerate direction doesn't match either '+' or '-'")
if len(matches) % 3:
raise UserWarning("The vulgar output from exonerate has failed \
to parse correctly")
#Split output into [modifier, query_count, ref_count] triples
matches = [matches[i*3:i*3+3] for i in range(len(matches)//3)]
matched_bases = {}
codon = []
#Convert vulgar format to dictionary with residue: codon pairs
for region in matches:
modifier = region[0]
count1 = int(region[1])
count2 = int(region[2])
if modifier == 'M':
if count1 != count2 / 3:
raise UserWarning("Match in vulgar output is possibly " +
"incorrect - number of protein residues " +
"should be the number of bases divided by 3")
for _ in range(count2):
dna_count = step(dna_count, 1)
codon.append(dna_count)
if len(codon) == 3:
protein_count += 1
matched_bases[protein_count] = tuple(codon)
codon = []
if modifier == 'C':
if count1 != count2 / 3:
raise UserWarning("Codon in vulgar output is possibly " +
"incorrect - number of protein residues " +
"should be the number of bases divided by 3")
raise UserWarning("Unexpected output in vulgar format - not " +
"expected to need functionality for 'codon' " +
"modifier")
if modifier == 'G' or modifier == 'N':
if codon:
raise UserWarning("Warning - split codon over gap in " +
"exonerate output!")
protein_count = protein_count + count1
dna_count = step(dna_count, count2)
if modifier == '5' or modifier == '3':
if count1 != 0:
raise UserWarning("Warning - protein count should be 0 in " +
"exonerate output over intron splice sites.")
dna_count = step(dna_count, count2)
if modifier == 'I':
if count1 != 0:
raise UserWarning("Warning - protein count should be 0 in " +
"exonerate output over intron.")
dna_count = step(dna_count, count2)
if modifier == 'S':
for _ in range(count2):
dna_count = step(dna_count, 1)
codon.append(dna_count)
if len(codon) == 3:
protein_count += 1
matched_bases[protein_count] = tuple(codon)
codon = []
if modifier == 'F':
raise UserWarning("Unexpected frameshift in exonerate output - " +
"check alignment input.")
return matched_bases
| {
"repo_name": "andrewguy/biostructmap",
"path": "biostructmap/seqtools.py",
"copies": "1",
"size": "23316",
"license": "mit",
"hash": -6866793555426456000,
"line_mean": 38.5186440678,
"line_max": 97,
"alpha_frac": 0.6145565277,
"autogenerated": false,
"ratio": 3.939844542075025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5054401069775025,
"avg_score": null,
"num_lines": null
} |
"""A collection of tools to handle tests over protein multiple sequence alignments.
Part of the biostructmap package.
"""
from __future__ import absolute_import, division, print_function
from io import StringIO
import warnings
from math import log
from Bio import AlignIO
from Bio.Data import IUPACData
from numpy import mean
import dendropy
from .seqtools import _sliding_window_var_sites
from .gentests import _calculate_shannon_entropy
def shannon_entropy_protein(alignment, protein_letters=IUPACData.protein_letters,
normalized=False, gap='-'):
'''
Calculate mean Shannon entropy for all residues in a protein alignment.
Args:
alignment (str/Bio.Align.MultipleSequenceAlignment): A multiple sequence
alignment string in FASTA format or a multiple sequence alignment
object as a Bio.Align.MultipleSequenceAlignment.
protein_letters (str, optional): String of all protein letters being
used to define the amino acid alphabet. Defaults to standard 20
amino acids. If another alphabet is used (if your sequence contains
non-standard amino acid), then the maximum Shannon entropy values
will change accordingly.
normalized (bool): Normalize such the entropy is in the range [0, 1]
Returns:
float: Shannon entropy value.
'''
if isinstance(alignment, str):
alignment = AlignIO.read(StringIO(alignment), format='fasta')
if not alignment or len(alignment[0]) == 0:
return None
translated_positions = list(zip(*[str(x.seq) for
x in alignment]))
not_in_alphabet = set([res for x in translated_positions
for res in x]).difference(protein_letters)
if not_in_alphabet:
warnings.warn("Multiple sequence alignment contains residues that aren't "\
"in the provided alphabet. Entropy values will not be "\
"accurate - consider supplying an extended amino acid "\
"alphabet to the `protein_letters` keyword argument. "\
"Offending residue(s) are: {res}".format(res=str(not_in_alphabet)))
entropy_values = [_calculate_shannon_entropy(x, protein_letters, normalized)
for x in translated_positions]
entropy = mean(entropy_values)
return entropy
| {
"repo_name": "andrewguy/biostructmap",
"path": "biostructmap/protein_tests.py",
"copies": "1",
"size": "2417",
"license": "mit",
"hash": -5550801350473491000,
"line_mean": 42.9454545455,
"line_max": 89,
"alpha_frac": 0.6628051303,
"autogenerated": false,
"ratio": 4.3707052441229655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5533510374422965,
"avg_score": null,
"num_lines": null
} |
# a collection of turtle generation functions
# not meant to be run directly
# only contains functions related to rdflib.Graph manipulation
# only nonspecific stuff: shouldn't contain any functions directly related
# to data structure(rdf triple organization) of the modules you're dev'ing
import config
from middleware.graphers.turtle_utils import generate_hash, generate_uri as gu, link_uris
from middleware.blazegraph.upload_graph import queue_upload
from middleware.graphers.turtle_utils import actual_filename
from rdflib import Namespace, Graph, Literal, plugin
from Bio import SeqIO
from os.path import basename
def generate_graph(transitive=True):
'''
Parses all the Namespaces defined in the config file and returns a graph
with them bound.
Return:
(rdflib.Graph): a graph with all the defined Namespaces bound to it.
'''
graph = Graph()
for key in config.namespaces.keys():
if key is 'root':
graph.bind('', config.namespaces['root'])
else:
graph.bind(key, config.namespaces[key])
# add edge equivlaence properties
if transitive:
graph.add((gu(':hasPart'), gu('rdf:type'), gu('owl:TransitiveProperty')))
graph.add((gu(':isFoundIn'), gu('rdf:type'), gu('owl:TransitiveProperty')))
#graph.add((gu(':hasPart'), gu('rdf:type'), gu('owl:SymmetricProperty')))
# make AntimicrobialResistanceGene & VirulenceFactor subclasses of :Marker
graph.add((gu(':AntimicrobialResistanceGene'), gu('rdfs:subClassOf'), gu(':Marker')))
graph.add((gu(':VirulenceFactor'), gu('rdfs:subClassOf'), gu(':Marker')))
graph.add((gu(':PanGenomeRegion'), gu('rdfs:subClassOf'), gu(':Marker')))
# human-readable dc:description for edge types
graph.add((gu('ge:0001076'), gu('dc:description'), Literal('O-Type')))
graph.add((gu('ge:0001077'), gu('dc:description'), Literal('H-Type')))
graph.add((gu('ge:0000024'), gu('dc:description'), Literal('Upload_Date')))
graph.add((gu(':Marker'), gu('dc:description'), Literal('Any_Marker')))
graph.add((gu(':VirulenceFactor'), gu('dc:description'), Literal('Virulence_Factor')))
graph.add((gu(':AntimicrobialResistanceGene'), gu('dc:description'), Literal('AMR_Gene')))
# human-readable dc:description for object types
graph.add((gu('so:0001462'), gu('dc:description'), Literal('Bag_of_Contigs')))
graph.add((gu(':spfyId'), gu('dc:description'), Literal('SpfyId')))
return graph
def generate_turtle_skeleton(query_file):
'''
Handles the main generation of a turtle object.
NAMING CONVENTIONS:
uriIsolate: this is the top-most entry, a uniq. id per file is allocated by checking our DB for the greatest most entry (not in this file)
ex. :spfy234
uriAssembly: aka. the genome ID, this is a sha1 hash of the file contents
ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba
uriContig: indiv contig ids; from SeqIO.record.id - this should be uniq to a contig (at least within a given file)
ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/FLOF01006689.1
note: the record.id is what RGI uses as a prefix for ORF_ID (ORF_ID has additional _314 or w/e #s)
Args:
query_file(str): path to the .fasta file (this should already incl the directory)
Returns:
graph: the graph with all the triples generated from the .fasta file
'''
# Base graph generation
graph = generate_graph()
# uriGenome generation
file_hash = generate_hash(query_file)
uriGenome = gu(':' + file_hash)
# set the object type for uriGenome
graph.add((uriGenome, gu('rdf:type'), gu('g:Genome')))
# this is used as the human readable display of Genome
graph.add((uriGenome, gu('dc:description'), Literal(actual_filename(query_file))))
# note that timestamps are not added in base graph generation, they are only added during the check for duplicate files in blazegraph
# uri for bag of contigs
# ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/
uriContigs = gu(uriGenome, "/contigs")
# set the object type for uriContigs
graph.add((uriContigs, gu('rdf:type'), gu('so:0001462')))
# link the bag of contigs to the genome
graph = link_uris(graph, uriGenome, uriContigs)
#graph.add((uriGenome, gu(':hasPart'), uriContigs))
for record in SeqIO.parse(open(query_file), "fasta"):
# ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/FLOF01006689.1
uriContig = gu(uriContigs, '/' + record.id)
# add the object type to uriContig
graph.add((uriContig, gu('rdf:type'), gu('g:Contig')))
# linking the spec contig and the bag of contigs
graph = link_uris(graph, uriContigs, uriContig)
#graph.add((uriContigs, gu(':hasPart'), uriContig))
# uriContig attributes
graph.add((uriContig, gu('g:DNASequence'), Literal(record.seq)))
graph.add((uriContig, gu('g:Description'),
Literal(record.description)))
graph.add((uriContig, gu('g:Identifier'),
Literal(record.id)))
# human-readable ; the description here is different because
# record.description tends to be rather long
# instead, record.id is the accession eg: FLOF01006689.1
graph.add((uriContig, gu('dc:description'),
Literal(record.description)))
return graph
def turtle_grapher(query_file):
graph = generate_turtle_skeleton(query_file)
queue_upload(graph)
return graph
| {
"repo_name": "superphy/backend",
"path": "app/middleware/graphers/turtle_grapher.py",
"copies": "1",
"size": "5520",
"license": "apache-2.0",
"hash": -4459132450892370000,
"line_mean": 45.3865546218,
"line_max": 142,
"alpha_frac": 0.6802536232,
"autogenerated": false,
"ratio": 3.495883470550982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46761370937509816,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.