code
stringlengths 1
199k
|
|---|
import collections
import logging
import re
import time
import irc.client
from . import message
__all__ = ['Event3', 'ServerConnection3']
log = logging.getLogger(__name__)
class Event3(irc.client.Event):
"""An IRC event with tags
See `tag specification <http://ircv3.net/specs/core/message-tags-3.2.html>`_.
"""
def __init__(self, type, source, target, arguments=None, tags=None):
"""Initialize a new event
:param type: a string describing the event
:type type: :class:`str`
:param source: The originator of the event. NickMask or server
:type source: :class:`irc.client.NickMask` | :class:`str`
:param target: The target of the event
:type target: :class:`str`
:param arguments: Any specific event arguments
:type arguments: :class:`list` | None
:raises: None
"""
super(Event3, self).__init__(type, source, target, arguments)
self.tags = tags
def __repr__(self, ): # pragma: no cover
"""Return a canonical representation of the object
:rtype: :class:`str`
:raises: None
"""
args = (self.__class__.__name__, self.type, self.source, self.target, self.arguments, self.tags)
return '<%s %s, %s to %s, %s, tags: %s>' % args
def __eq__(self, other):
"""Return True, if the events share equal attributes
:param other: the other event to compare
:type other: :class:`Event3`
:returns: True, if equal
:rtype: :class:`bool`
:raises: None
"""
return self.type == other.type and\
self.source == other.source and\
self.target == other.target and\
self.arguments == other.arguments and\
self.tags == other.tags
class ServerConnection3(irc.client.ServerConnection):
"""ServerConncetion that can handle irc v3 tags
Tags are only handled for privmsg, pubmsg, notice events.
All other events might be handled the old way.
"""
_cmd_pat = "^(@(?P<tags>[^ ]+) +)?(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?"
_rfc_1459_command_regexp = re.compile(_cmd_pat)
def __init__(self, reactor, msglimit=20, limitinterval=30):
"""Initialize a connection that has a limit to sending messages
:param reactor: the reactor of the connection
:type reactor: :class:`irc.client.Reactor`
:param msglimit: the maximum number of messages to send in limitinterval
:type msglimit: :class:`int`
:param limitinterval: the timeframe in seconds in which you can only send
as many messages as in msglimit
:type limitinterval: :class:`int`
:raises: None
"""
super(ServerConnection3, self).__init__(reactor)
self.sentmessages = collections.deque(maxlen=msglimit + 1)
"""A queue with timestamps form the last sent messages.
So we can track if we send to many messages."""
self.limitinterval = limitinterval
"""the timeframe in seconds in which you can only send
as many messages as in :data:`ServerConncetion3msglimit`"""
def get_waittime(self):
"""Return the appropriate time to wait, if we sent too many messages
:returns: the time to wait in seconds
:rtype: :class:`float`
:raises: None
"""
now = time.time()
self.sentmessages.appendleft(now)
if len(self.sentmessages) == self.sentmessages.maxlen:
# check if the oldes message is older than
# limited by self.limitinterval
oldest = self.sentmessages[-1]
waittime = self.limitinterval - (now - oldest)
if waittime > 0:
return waittime + 1 # add a little buffer
return 0
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
If too many messages are sent, this will call
:func:`time.sleep` until it is allowed to send messages again.
:param string: the raw string to send
:type string: :class:`str`
:returns: None
:raises: :class:`irc.client.InvalidCharacters`,
:class:`irc.client.MessageTooLong`,
:class:`irc.client.ServerNotConnectedError`
"""
waittime = self.get_waittime()
if waittime:
log.debug('Sent too many messages. Waiting %s seconds',
waittime)
time.sleep(waittime)
return super(ServerConnection3, self).send_raw(string)
def _process_line(self, line):
"""Process the given line and handle the events
:param line: the raw message
:type line: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
m = self._rfc_1459_command_regexp.match(line)
prefix = m.group('prefix')
tags = self._process_tags(m.group('tags'))
source = self._process_prefix(prefix)
command = self._process_command(m.group('command'))
arguments = self._process_arguments(m.group('argument'))
if not self.real_server_name:
self.real_server_name = prefix
# Translate numerics into more readable strings.
command = irc.events.numeric.get(command, command)
if command not in ["privmsg", "notice"]:
return super(ServerConnection3, self)._process_line(line)
event = Event3("all_raw_messages", self.get_server_name(),
None, [line], tags=tags)
self._handle_event(event)
target, msg = arguments[0], arguments[1]
messages = irc.ctcp.dequote(msg)
command = self._resolve_command(command, target)
for m in messages:
self._handle_message(tags, source, command, target, m)
def _resolve_command(self, command, target):
"""Get the correct event for the command
Only for 'privmsg' and 'notice' commands.
:param command: The command string
:type command: :class:`str`
:param target: either a user or a channel
:type target: :class:`str`
:returns: the correct event type
:rtype: :class:`str`
:raises: None
"""
if command == "privmsg":
if irc.client.is_channel(target):
command = "pubmsg"
else:
if irc.client.is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
return command
def _handle_message(self, tags, source, command, target, msg):
"""Construct the correct events and handle them
:param tags: the tags of the message
:type tags: :class:`list` of :class:`message.Tag`
:param source: the sender of the message
:type source: :class:`str`
:param command: the event type
:type command: :class:`str`
:param target: the target of the message
:type target: :class:`str`
:param msg: the content
:type msg: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
if isinstance(msg, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
msg = list(msg)
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, msg)
event = Event3(command, source, target, msg, tags=tags)
self._handle_event(event)
if command == "ctcp" and msg[0] == "ACTION":
event = Event3("action", source, target, msg[1:], tags=tags)
self._handle_event(event)
else:
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, [msg])
event = Event3(command, source, target, [msg], tags=tags)
self._handle_event(event)
def _process_tags(self, tags):
"""Process the tags of the message
:param tags: the tags string of a message
:type tags: :class:`str` | None
:returns: list of tags
:rtype: :class:`list` of :class:`message.Tag`
:raises: None
"""
if not tags:
return []
return [message.Tag.from_str(x) for x in tags.split(';')]
def _process_prefix(self, prefix):
"""Process the prefix of the message and return the source
:param prefix: The prefix string of a message
:type prefix: :class:`str` | None
:returns: The prefix wrapped in :class:`irc.client.NickMask`
:rtype: :class:`irc.client.NickMask` | None
:raises: None
"""
if not prefix:
return None
return irc.client.NickMask(prefix)
def _process_command(self, command):
"""Return a lower string version of the command
:param command: the command of the message
:type command: :class:`str` | None
:returns: The lower case version
:rtype: :class:`str` | None
:raises: None
"""
if not command:
return None
return command.lower()
def _process_arguments(self, arguments):
"""Process the arguments
:param arguments: arguments string of a message
:type arguments: :class:`str` | None
:returns: A list of arguments
:rtype: :class:`list` of :class:`str` | None
:raises: None
"""
if not arguments:
return None
a = arguments.split(" :", 1)
arglist = a[0].split()
if len(a) == 2:
arglist.append(a[1])
return arglist
|
from . import questions, users, wiki, forums # noqa
|
from django import forms
class SubscribeForm(forms.Form):
email = forms.EmailField()
class UnsubscribeForm(forms.Form):
email = forms.EmailField()
|
"""Auto-generated file, do not edit by hand. PM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PM = PhoneMetadata(id='PM', country_code=508, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[45]\\d{5}', possible_number_pattern='\\d{6}', possible_length=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='41\\d{4}', example_number='411234', possible_length=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='55\\d{4}', example_number='551234', possible_length=(6,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([45]\\d)(\\d{2})(\\d{2})', format='\\1 \\2 \\3', national_prefix_formatting_rule='0\\1')])
|
import math
from klampt import vectorops
from klampt import so3
def solve_2R_inverse_kinematics(x,y,L1=1,L2=1):
"""For a 2R arm centered at the origin, solves for the joint angles
(q1,q2) that places the end effector at (x,y).
The result is a list of up to 2 solutions, e.g. [(q1,q2),(q1',q2')].
"""
D = vectorops.norm((x,y))
thetades = math.atan2(y,x)
if D == 0:
raise ValueError("(x,y) at origin, infinite # of solutions")
c2 = (D**2-L1**2-L2**2)/(2.0*L1*L2)
q2s = []
if c2 < -1:
print "solve_2R_inverse_kinematics: (x,y) inside inner circle"
return []
elif c2 > 1:
print "solve_2R_inverse_kinematics: (x,y) out of reach"
return []
else:
if c2 == 1:
q2s = [math.acos(c2)]
else:
q2s = [math.acos(c2),-math.acos(c2)]
res = []
for q2 in q2s:
thetaactual = math.atan2(math.sin(q2),L1+L2*math.cos(q2))
q1 = thetades - thetaactual
res.append((q1,q2))
return res
def solve_3R_forward_kinematics(q1,q2,q3,L1=1,L2=1,L3=1):
"""Returns a list of (x,y,theta) triples for each link
for a planar, 3R manipulator with link lengths L1, L2, L3.
It also returns the end effector transform."""
T1 = (0,0,q1)
dx1 = (L1*math.cos(T1[2]),L1*math.sin(T1[2]))
T2 = vectorops.add((T1[0],T1[1]),dx1)+[T1[2]+q2]
dx2 = (L2*math.cos(T2[2]),L2*math.sin(T2[2]))
T3 = vectorops.add((T2[0],T2[1]),dx2)+[T2[2]+q3]
dx3 = (L3*math.cos(T3[2]),L2*math.sin(T3[2]))
T4 = vectorops.add((T3[0],T3[1]),dx3)+[T3[2]]
return [T1,T2,T3,T4]
def solve_3R_inverse_kinematics(x,y,theta,L1=1,L2=1,L3=1):
"""TODO: for a planar, 3R manipulator with link lengths L1, L2, L3,
solve for the joint angles (q1,q2,q3) such that the end effector
is placed at x,y and is oriented along the angle theta.
In general there will be up to two solutions. The result is a list of
solutions
The current implementation only tries to reach x,y with the second and
third joint angle with the routine presented in class"""
q1,q2,q3 = 0,0,0
(x1,y1) = (L1*math.cos(q1),L1*math.sin(q1))
q23s = solve_2R_inverse_kinematics(x-x1,y-y1,L2,L3)
res = []
for q2,q3 in q23s:
res.append((q1,q2,q3))
return res
def run_ex1():
print solve_3R_forward_kinematics(0,0,0)
#test 1
xdes = (3,0,0)
qs = solve_3R_inverse_kinematics(*xdes)
print "xdes =",xdes,
if len(qs)==0:
print "failed"
else:
print len(qs),"solutions:"
for q in qs:
print " q =",q,"fk =",solve_3R_forward_kinematics(*q)[3]
#note the orientation error
xdes = (2.0,0.5,0)
qs = solve_3R_inverse_kinematics(*xdes)
print "xdes =",xdes,
if len(qs)==0:
print "failed"
else:
print len(qs),"solutions:"
for q in qs:
print " q =",q,"fk =",solve_3R_forward_kinematics(*q)[3]
#note the orientation error
xdes = (2.0,0.0,math.pi*0.5)
qs = solve_3R_inverse_kinematics(*xdes)
print "xdes =",xdes,
if len(qs)==0:
print "failed"
else:
print len(qs),"solutions:"
for q in qs:
print " q =",q,"fk =",solve_3R_forward_kinematics(*q)[3]
#this one is feasible, but the current implementation fails
xdes = (-3.0,0.0,0)
qs = solve_3R_inverse_kinematics(*xdes)
print "xdes =",xdes,
if len(qs)==0:
print "failed"
else:
print len(qs),"solutions:"
for q in qs:
print " q =",q,"fk =",solve_3R_forward_kinematics(*q)[3]
#this one is infeasible
xdes = (-2.0,0.0,-math.pi*0.5)
qs = solve_3R_inverse_kinematics(*xdes)
print "xdes =",xdes,
if len(qs)==0:
print "failed"
else:
print len(qs),"solutions:"
for q in qs:
print " q =",q,"fk =",solve_3R_forward_kinematics(*q)[3]
if __name__ == "__main__":
run_ex1()
|
''' A categorical scatter plot based on GitHub commit history. This example
demonstrates using a ``jitter`` transform.
.. bokeh-example-metadata::
:sampledata: commits
:apis: bokeh.plotting.figure.scatter
:refs: :ref:`userguide_categorical` > :ref:`userguide_categorical_scatters` > :ref:`userguide_categorical_scatters_jitter`
:keywords: jitter, scatter
'''
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, show
from bokeh.sampledata.commits import data
from bokeh.transform import jitter
DAYS = ['Sun', 'Sat', 'Fri', 'Thu', 'Wed', 'Tue', 'Mon']
source = ColumnDataSource(data)
p = figure(width=800, height=300, y_range=DAYS, x_axis_type='datetime',
title="Commits by Time of Day (US/Central) 2012-2016")
p.scatter(x='time', y=jitter('day', width=0.6, range=p.y_range), source=source, alpha=0.3)
p.xaxis.formatter.days = ['%Hh']
p.x_range.range_padding = 0
p.ygrid.grid_line_color = None
show(p)
|
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
extensions = ["sphinx.ext.viewcode"]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "django-functest"
copyright = "2016-2018, Luke Plant"
version = "1.2"
release = "1.2"
exclude_patterns = ["_build"]
pygments_style = "sphinx"
html_theme = "default"
html_static_path = ["_static"]
htmlhelp_basename = "django-functestdoc"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
latex_documents = [
(
"index",
"django-functest.tex",
"django-functest Documentation",
"Luke Plant",
"manual",
),
]
man_pages = [("index", "django-functest", "django-functest Documentation", ["Luke Plant"], 1)]
texinfo_documents = [
(
"index",
"django-functest",
"django-functest Documentation",
"Luke Plant",
"django-functest",
"One line description of project.",
"Miscellaneous",
),
]
|
""" This modules provides the translation tables from python to c++. """
import ast
import inspect
import logging
import sys
from pythran import cxxtypes
from pythran.conversion import to_ast, ToNotEval
from pythran.cxxtypes import NamedType
from pythran.intrinsic import Class
from pythran.intrinsic import ClassWithConstConstructor, ExceptionClass
from pythran.intrinsic import ClassWithReadOnceConstructor
from pythran.intrinsic import ConstFunctionIntr, FunctionIntr, UpdateEffect
from pythran.intrinsic import ConstMethodIntr, MethodIntr, AttributeIntr
from pythran.intrinsic import ReadEffect, ConstantIntr
from pythran.intrinsic import ReadOnceFunctionIntr, ConstExceptionIntr
from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE
from pythran import range as prange
logger = logging.getLogger("pythran")
pythran_ward = '__pythran_'
namespace = "pythonic"
cxx_keywords = {
'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'class',
'compl', 'const', 'const_cast', 'continue', 'default', 'delete',
'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend',
'goto', 'if', 'inline', 'int', 'long', 'mutable', 'namespace', 'new',
'not', 'not_eq', 'operator', 'or', 'or_eq', 'private', 'protected',
'public', 'register', 'reinterpret_cast', 'return', 'short', 'signed',
'sizeof', 'static', 'static_cast',
'struct', 'switch', 'template', 'this', 'throw', 'true',
'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned',
'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while',
'xor', 'xor_eq',
# C++11 additions
'constexpr', 'decltype', 'noexcept', 'nullptr', 'static_assert',
# reserved namespaces
'std',
}
operator_to_lambda = {
# boolop
ast.And:
"(pythonic::__builtin__::functor::bool_{{}}({0})?({1}):({0}))".format,
ast.Or:
"(pythonic::__builtin__::functor::bool_{{}}({0})?({0}):({1}))".format,
# operator
ast.Add: "({0} + {1})".format,
ast.Sub: "({0} - {1})".format,
ast.Mult: "({0} * {1})".format,
ast.Div: "({0} / {1})".format,
ast.Mod: "(pythonic::operator_::mod({0}, {1}))".format,
ast.Pow: "(pythonic::__builtin__::pow({0}, {1}))".format,
ast.LShift: "({0} << {1})".format,
ast.RShift: "({0} >> {1})".format,
ast.BitOr: "({0} | {1})".format,
ast.BitXor: "({0} ^ {1})".format,
ast.BitAnd: "({0} & {1})".format,
# assume from __future__ import division
ast.FloorDiv: "(pythonic::operator_::floordiv({0}, {1}))".format,
# unaryop
ast.Invert: "(~{0})".format,
ast.Not: "(not {0})".format,
ast.UAdd: "(+{0})".format,
ast.USub: "(-{0})".format,
# cmpop
ast.Eq: "({0} == {1})".format,
ast.NotEq: "({0} != {1})".format,
ast.Lt: "({0} < {1})".format,
ast.LtE: "({0} <= {1})".format,
ast.Gt: "({0} > {1})".format,
ast.GtE: "({0} >= {1})".format,
ast.Is: ("(pythonic::__builtin__::id({0}) == "
"pythonic::__builtin__::id({1}))").format,
ast.IsNot: ("(pythonic::__builtin__::id({0}) != "
"pythonic::__builtin__::id({1}))").format,
ast.In: "(pythonic::in({1}, {0}))".format,
ast.NotIn: "(not pythonic::in({1}, {0}))".format,
}
def update_effects(self, node):
"""
Combiner when we update the fisrst argument of a function.
It turn type of first parameter in combination of all others
parameters types.
"""
return [self.combine(node.args[0], node_args_k, register=True,
aliasing_type=True)
for node_args_k in node.args[1:]]
CLASSES = {
"list": {
"append": MethodIntr(
lambda self, node:
self.combine(
node.args[0],
node.args[1],
unary_op=cxxtypes.ListType,
register=True,
aliasing_type=True)
),
"extend": MethodIntr(update_effects),
"index": ConstMethodIntr(),
"pop": MethodIntr(),
"reverse": MethodIntr(),
"sort": MethodIntr(),
"count": ConstMethodIntr(),
"remove": MethodIntr(),
"insert": MethodIntr(
lambda self, node:
self.combine(
node.args[0],
node.args[2],
unary_op=cxxtypes.ListType,
register=True,
aliasing_type=True)
),
},
"str": {
"capitalize": ConstMethodIntr(),
"count": ConstMethodIntr(),
"endswith": ConstMethodIntr(return_range=prange.bool_values),
"startswith": ConstMethodIntr(return_range=prange.bool_values),
"find": ConstMethodIntr(),
"isalpha": ConstMethodIntr(return_range=prange.bool_values),
"isdigit": ConstMethodIntr(return_range=prange.bool_values),
"join": ConstMethodIntr(),
"lower": ConstMethodIntr(),
"replace": ConstMethodIntr(),
"split": ConstMethodIntr(),
"strip": ConstMethodIntr(),
"lstrip": ConstMethodIntr(),
"rstrip": ConstMethodIntr(),
"upper": ConstMethodIntr(),
},
"set": {
"add": MethodIntr(
lambda self, node:
self.combine(
node.args[0],
node.args[1],
unary_op=cxxtypes.SetType,
register=True,
aliasing_type=True)
),
"clear": MethodIntr(),
"copy": ConstMethodIntr(),
"discard": MethodIntr(),
"remove": MethodIntr(),
"isdisjoint": ConstMethodIntr(return_range=prange.bool_values),
"union_": ConstMethodIntr(),
"update": MethodIntr(update_effects),
"intersection": ConstMethodIntr(),
"intersection_update": MethodIntr(update_effects),
"difference": ConstMethodIntr(),
"difference_update": MethodIntr(update_effects),
"symmetric_difference": ConstMethodIntr(),
"symmetric_difference_update": MethodIntr(update_effects),
"issuperset": ConstMethodIntr(return_range=prange.bool_values),
"issubset": ConstMethodIntr(return_range=prange.bool_values),
},
"Exception": {
"args": AttributeIntr(return_type=NamedType("pythonic::types::str")),
"errno": AttributeIntr(return_type=NamedType("pythonic::types::str")),
"strerror": AttributeIntr(
return_type=NamedType("pythonic::types::str")),
"filename": AttributeIntr(
return_type=NamedType("pythonic::types::str")),
},
"float": {
"is_integer": ConstMethodIntr(return_range=prange.bool_values),
},
"complex": {
"conjugate": ConstMethodIntr(),
"real": AttributeIntr(return_type=NamedType("double")),
"imag": AttributeIntr(return_type=NamedType("double")),
},
"dict": {
"fromkeys": ConstFunctionIntr(),
"clear": MethodIntr(),
"copy": ConstMethodIntr(),
"get": ConstMethodIntr(),
"has_key": ConstMethodIntr(return_range=prange.bool_values),
"items": MethodIntr(),
"iteritems": MethodIntr(),
"iterkeys": MethodIntr(),
"itervalues": MethodIntr(),
"keys": MethodIntr(),
"pop": MethodIntr(),
"popitem": MethodIntr(),
"setdefault": MethodIntr(
lambda self, node:
len(node.args) == 3 and
self.combine(
node.args[0],
node.args[1],
unary_op=lambda x: cxxtypes.DictType(
x,
self.result[node.args[2]]),
register=True,
aliasing_type=True),
return_alias=lambda args: {
ast.Subscript(args[0],
ast.Index(args[1]),
ast.Load())
}
),
"update": MethodIntr(update_effects),
"values": MethodIntr(),
"viewitems": MethodIntr(),
"viewkeys": MethodIntr(),
"viewvalues": MethodIntr(),
},
"file": {
# Member variables
"closed": AttributeIntr(return_type=NamedType("bool")),
"mode": AttributeIntr(return_type=NamedType("pythonic::types::str")),
"name": AttributeIntr(return_type=NamedType("pythonic::types::str")),
"newlines": AttributeIntr(
return_type=NamedType("pythonic::types::str")),
# Member functions
"close": MethodIntr(global_effects=True),
"flush": MethodIntr(global_effects=True),
"fileno": MethodIntr(),
"isatty": MethodIntr(),
"next": MethodIntr(global_effects=True),
"read": MethodIntr(global_effects=True),
"readline": MethodIntr(global_effects=True),
"readlines": MethodIntr(global_effects=True),
"xreadlines": MethodIntr(global_effects=True),
"seek": MethodIntr(global_effects=True),
"tell": MethodIntr(),
"truncate": MethodIntr(global_effects=True),
"write": MethodIntr(global_effects=True),
"writelines": MethodIntr(global_effects=True),
},
"finfo": {
"eps": AttributeIntr(),
},
"ndarray": {
"astype": MethodIntr(),
"dtype": AttributeIntr(),
"fill": MethodIntr(),
"flat": AttributeIntr(),
"flatten": MethodIntr(),
"item": MethodIntr(),
"itemsize": AttributeIntr(return_type=NamedType("long"),
return_range=prange.positive_values),
"nbytes": AttributeIntr(return_type=NamedType("long"),
return_range=prange.positive_values),
"ndim": AttributeIntr(return_type=NamedType("long"),
return_range=prange.positive_values),
"shape": AttributeIntr(),
"size": AttributeIntr(return_type=NamedType("long"),
return_range=prange.positive_values),
"strides": AttributeIntr(),
"T": AttributeIntr(),
"tolist": ConstMethodIntr(),
"tostring": ConstMethodIntr(),
},
}
MODULES = {
"__builtin__": {
"pythran": {
"len_set": ConstFunctionIntr()
},
"abs": ConstFunctionIntr(),
"BaseException": ConstExceptionIntr(),
"SystemExit": ConstExceptionIntr(),
"KeyboardInterrupt": ConstExceptionIntr(),
"GeneratorExit": ConstExceptionIntr(),
"Exception": ExceptionClass(CLASSES["Exception"]),
"StopIteration": ConstExceptionIntr(),
"StandardError": ConstExceptionIntr(),
"Warning": ConstExceptionIntr(),
"BytesWarning": ConstExceptionIntr(),
"UnicodeWarning": ConstExceptionIntr(),
"ImportWarning": ConstExceptionIntr(),
"FutureWarning": ConstExceptionIntr(),
"UserWarning": ConstExceptionIntr(),
"SyntaxWarning": ConstExceptionIntr(),
"RuntimeWarning": ConstExceptionIntr(),
"PendingDeprecationWarning": ConstExceptionIntr(),
"DeprecationWarning": ConstExceptionIntr(),
"BufferError": ConstExceptionIntr(),
"ArithmeticError": ConstExceptionIntr(),
"AssertionError": ConstExceptionIntr(),
"AttributeError": ConstExceptionIntr(),
"EnvironmentError": ConstExceptionIntr(),
"EOFError": ConstExceptionIntr(),
"ImportError": ConstExceptionIntr(),
"LookupError": ConstExceptionIntr(),
"MemoryError": ConstExceptionIntr(),
"NameError": ConstExceptionIntr(),
"ReferenceError": ConstExceptionIntr(),
"RuntimeError": ConstExceptionIntr(),
"SyntaxError": ConstExceptionIntr(),
"SystemError": ConstExceptionIntr(),
"TypeError": ConstExceptionIntr(),
"ValueError": ConstExceptionIntr(),
"FloatingPointError": ConstExceptionIntr(),
"OverflowError": ConstExceptionIntr(),
"ZeroDivisionError": ConstExceptionIntr(),
"IOError": ConstExceptionIntr(),
"OSError": ConstExceptionIntr(),
"IndexError": ConstExceptionIntr(),
"KeyError": ConstExceptionIntr(),
"UnboundLocalError": ConstExceptionIntr(),
"NotImplementedError": ConstExceptionIntr(),
"IndentationError": ConstExceptionIntr(),
"TabError": ConstExceptionIntr(),
"UnicodeError": ConstExceptionIntr(),
# "UnicodeDecodeError": ConstExceptionIntr(),
# "UnicodeEncodeError": ConstExceptionIntr(),
# "UnicodeTranslateError": ConstExceptionIntr(),
"all": ReadOnceFunctionIntr(return_range=prange.bool_values),
"any": ReadOnceFunctionIntr(return_range=prange.bool_values),
"bin": ConstFunctionIntr(),
"bool_": ConstFunctionIntr(return_range=prange.bool_values),
"chr": ConstFunctionIntr(),
"cmp": ConstFunctionIntr(return_range=prange.cmp_values),
"complex": ClassWithConstConstructor(CLASSES['complex']),
"dict": ClassWithReadOnceConstructor(CLASSES['dict']),
"divmod": ConstFunctionIntr(),
"enumerate": ReadOnceFunctionIntr(),
"file": ClassWithConstConstructor(CLASSES['file'],
global_effects=True),
"filter": ReadOnceFunctionIntr(),
"float_": ClassWithConstConstructor(CLASSES['float']),
"getattr": ConstFunctionIntr(),
"hex": ConstFunctionIntr(),
"id": ConstFunctionIntr(),
"int_": ConstFunctionIntr(),
"iter": FunctionIntr(), # not const
"len": ConstFunctionIntr(return_range=prange.positive_values),
"list": ClassWithReadOnceConstructor(CLASSES['list']),
"long_": ConstFunctionIntr(),
"map": ReadOnceFunctionIntr(),
"max": ReadOnceFunctionIntr(return_range=prange.max_values),
"min": ReadOnceFunctionIntr(return_range=prange.min_values),
"next": FunctionIntr(), # not const
"oct": ConstFunctionIntr(),
"ord": ConstFunctionIntr(return_range=prange.ord_values),
"open": ConstFunctionIntr(global_effects=True),
"pow": ConstFunctionIntr(),
"range": ConstFunctionIntr(return_range_content=prange.range_values),
"reduce": ReadOnceFunctionIntr(),
"reversed": ReadOnceFunctionIntr(),
"round": ConstFunctionIntr(),
"set": ClassWithReadOnceConstructor(CLASSES['set']),
"sorted": ConstFunctionIntr(),
"str": ClassWithConstConstructor(CLASSES['str']),
"sum": ReadOnceFunctionIntr(),
"tuple": ReadOnceFunctionIntr(),
"xrange": ConstFunctionIntr(return_range_content=prange.range_values),
"zip": ReadOnceFunctionIntr(),
"False": ConstantIntr(return_range=lambda args: prange.Range(0, 0)),
"None": ConstantIntr(),
"True": ConstantIntr(return_range=lambda args: prange.Range(1, 1)),
},
"numpy": {
"abs": ConstFunctionIntr(),
"absolute": ConstFunctionIntr(),
"add": ConstFunctionIntr(),
"alen": ConstFunctionIntr(return_range=prange.positive_values),
"all": ConstMethodIntr(return_range=prange.bool_values),
"allclose": ConstFunctionIntr(return_range=prange.bool_values),
"alltrue": ConstFunctionIntr(return_range=prange.bool_values),
"amax": ConstFunctionIntr(),
"amin": ConstFunctionIntr(),
"angle": ConstFunctionIntr(),
"any": ConstMethodIntr(return_range=prange.bool_values),
"append": ConstFunctionIntr(),
"arange": ConstFunctionIntr(return_range_content=prange.range_values,
args=('start', 'stop', 'step', 'dtype'),
defaults=(1, None)),
"arccos": ConstFunctionIntr(),
"arccosh": ConstFunctionIntr(),
"arcsin": ConstFunctionIntr(),
"arcsinh": ConstFunctionIntr(),
"arctan": ConstFunctionIntr(),
"arctan2": ConstFunctionIntr(),
"arctanh": ConstFunctionIntr(),
"argmax": ConstMethodIntr(),
"argmin": ConstMethodIntr(),
"argsort": ConstFunctionIntr(),
"argwhere": ConstFunctionIntr(),
"around": ConstFunctionIntr(),
"array": ConstFunctionIntr(),
"array2string": ConstFunctionIntr(),
"array_equal": ConstFunctionIntr(return_range=prange.bool_values),
"array_equiv": ConstFunctionIntr(return_range=prange.bool_values),
"array_split": ConstFunctionIntr(),
"array_str": ConstFunctionIntr(),
"asarray": ConstFunctionIntr(),
"asarray_chkfinite": ConstFunctionIntr(
return_range=prange.bool_values),
"ascontiguousarray": ConstFunctionIntr(),
"asfarray": ConstFunctionIntr(),
"asscalar": ConstFunctionIntr(),
"atleast_1d": ConstFunctionIntr(),
"atleast_2d": ConstFunctionIntr(),
"atleast_3d": ConstFunctionIntr(),
"average": ConstFunctionIntr(),
"base_repr": ConstFunctionIntr(),
"binary_repr": ConstFunctionIntr(),
"bincount": ConstFunctionIntr(),
"bitwise_and": ConstFunctionIntr(),
"bitwise_not": ConstFunctionIntr(),
"bitwise_or": ConstFunctionIntr(),
"bitwise_xor": ConstFunctionIntr(),
"ceil": ConstFunctionIntr(),
"clip": ConstMethodIntr(),
"concatenate": ConstFunctionIntr(),
"complex": ConstFunctionIntr(),
"complex64": ConstFunctionIntr(),
"complex128": ConstFunctionIntr(),
"conj": ConstMethodIntr(),
"conjugate": ConstMethodIntr(),
"copy": ConstMethodIntr(),
"copyto": FunctionIntr(argument_effects=[UpdateEffect(), ReadEffect(),
ReadEffect(), ReadEffect()]),
"copysign": ConstFunctionIntr(),
"count_nonzero": ConstFunctionIntr(),
"cos": ConstFunctionIntr(),
"cosh": ConstFunctionIntr(),
"cumprod": ConstMethodIntr(),
"cumproduct": ConstFunctionIntr(),
"cumsum": ConstMethodIntr(),
"deg2rad": ConstFunctionIntr(),
"degrees": ConstFunctionIntr(),
"delete_": ConstFunctionIntr(),
"diag": ConstFunctionIntr(),
"diagflat": ConstFunctionIntr(),
"diagonal": ConstMethodIntr(),
"diff": ConstFunctionIntr(),
"digitize": ConstFunctionIntr(),
"divide": ConstFunctionIntr(),
"dot": ConstMethodIntr(),
"double_": ConstFunctionIntr(),
"e": ConstantIntr(),
"ediff1d": ConstFunctionIntr(),
"empty": ConstFunctionIntr(args=('shape', 'dtype'),
defaults=("numpy.float64",)),
"empty_like": ConstFunctionIntr(args=('a', 'dtype'),
defaults=("numpy.float64",)),
"equal": ConstFunctionIntr(),
"exp": ConstFunctionIntr(),
"expm1": ConstFunctionIntr(),
"eye": ConstFunctionIntr(),
"fabs": ConstFunctionIntr(),
"finfo": ClassWithConstConstructor(CLASSES['finfo']),
"fix": ConstFunctionIntr(),
"flatnonzero": ConstFunctionIntr(),
"fliplr": ConstFunctionIntr(),
"flipud": ConstFunctionIntr(),
"float32": ConstFunctionIntr(),
"float64": ConstFunctionIntr(),
"float_": ConstFunctionIntr(),
"floor": ConstFunctionIntr(),
"floor_divide": ConstFunctionIntr(),
"fmax": ConstFunctionIntr(),
"fmin": ConstFunctionIntr(),
"fmod": ConstFunctionIntr(),
"frexp": ConstFunctionIntr(),
"fromfunction": ConstFunctionIntr(),
"fromiter": ConstFunctionIntr(),
"fromstring": ConstFunctionIntr(),
"greater": ConstFunctionIntr(),
"greater_equal": ConstFunctionIntr(),
"hstack": ConstFunctionIntr(),
"hypot": ConstFunctionIntr(),
"identity": ConstFunctionIntr(),
"imag": FunctionIntr(),
"indices": ConstFunctionIntr(),
"inf": ConstantIntr(),
"inner": ConstFunctionIntr(),
"insert": ConstFunctionIntr(),
"intersect1d": ConstFunctionIntr(),
"int16": ConstFunctionIntr(),
"int32": ConstFunctionIntr(),
"int64": ConstFunctionIntr(),
"int8": ConstFunctionIntr(),
"invert": ConstFunctionIntr(),
"isclose": ConstFunctionIntr(),
"iscomplex": ConstFunctionIntr(),
"isfinite": ConstFunctionIntr(),
"isinf": ConstFunctionIntr(),
"isnan": ConstFunctionIntr(),
"isneginf": ConstFunctionIntr(),
"isposinf": ConstFunctionIntr(),
"isreal": ConstFunctionIntr(),
"isrealobj": ConstFunctionIntr(),
"isscalar": ConstFunctionIntr(),
"issctype": ConstFunctionIntr(),
"ldexp": ConstFunctionIntr(),
"left_shift": ConstFunctionIntr(),
"less": ConstFunctionIntr(),
"less_equal": ConstFunctionIntr(),
"lexsort": ConstFunctionIntr(),
"linalg": {
"norm": FunctionIntr(args=('x', 'ord', 'axis'),
defaults=(None, None)),
},
"linspace": ConstFunctionIntr(),
"log": ConstFunctionIntr(),
"log10": ConstFunctionIntr(),
"log1p": ConstFunctionIntr(),
"log2": ConstFunctionIntr(),
"logaddexp": ConstFunctionIntr(),
"logaddexp2": ConstFunctionIntr(),
"logspace": ConstFunctionIntr(),
"logical_and": ConstFunctionIntr(),
"logical_not": ConstFunctionIntr(),
"logical_or": ConstFunctionIntr(),
"logical_xor": ConstFunctionIntr(),
"max": ConstMethodIntr(),
"maximum": ConstFunctionIntr(),
"mean": ConstMethodIntr(),
"median": ConstFunctionIntr(),
"min": ConstMethodIntr(),
"minimum": ConstFunctionIntr(),
"mod": ConstFunctionIntr(),
"multiply": ConstFunctionIntr(),
"nan": ConstantIntr(),
"nan_to_num": ConstFunctionIntr(),
"nanargmax": ConstFunctionIntr(),
"nanargmin": ConstFunctionIntr(),
"nanmax": ConstFunctionIntr(),
"nanmin": ConstFunctionIntr(),
"nansum": ConstFunctionIntr(),
"ndenumerate": ConstFunctionIntr(),
"ndarray": ClassWithConstConstructor(CLASSES["ndarray"]),
"ndindex": ConstFunctionIntr(),
"ndim": ConstFunctionIntr(return_range=prange.positive_values),
"negative": ConstFunctionIntr(),
"newaxis": ConstantIntr(),
"nextafter": ConstFunctionIntr(),
"NINF": ConstantIntr(),
"nonzero": ConstMethodIntr(),
"not_equal": ConstFunctionIntr(),
"ones": ConstFunctionIntr(),
"ones_like": ConstFunctionIntr(),
"outer": ConstFunctionIntr(),
"pi": ConstantIntr(),
"place": FunctionIntr(),
"power": ConstFunctionIntr(),
"prod": ConstMethodIntr(),
"product": ConstFunctionIntr(),
"ptp": ConstMethodIntr(),
"put": MethodIntr(),
"putmask": FunctionIntr(),
"rad2deg": ConstFunctionIntr(),
"radians": ConstFunctionIntr(),
"random": {
"binomial": FunctionIntr(args=('n', 'p', 'size'),
global_effects=True),
"bytes": FunctionIntr(args=('length',),
global_effects=True),
"choice": FunctionIntr(args=('a', 'size', 'replace', 'p'),
global_effects=True),
"normal": FunctionIntr(args=('loc', 'scale', 'size',),
defaults=(0.0, 1.0, None,),
global_effects=True),
"rand": FunctionIntr(args=(),
global_effects=True),
"ranf": FunctionIntr(args=('size',),
global_effects=True),
"randint": FunctionIntr(args=("low", "high", "size"),
global_effects=True),
"randn": FunctionIntr(args=(),
global_effects=True),
"random": FunctionIntr(args=('size',),
global_effects=True),
"random_integers": FunctionIntr(args=("low", "high", "size"),
global_effects=True),
"random_sample": FunctionIntr(args=('size',),
global_effects=True),
"sample": FunctionIntr(args=('size',),
global_effects=True),
"standard_normal": FunctionIntr(args=('size',),
global_effects=True),
},
"rank": ConstFunctionIntr(),
"ravel": ConstMethodIntr(),
"real": FunctionIntr(),
"reciprocal": ConstFunctionIntr(),
"remainder": ConstFunctionIntr(),
"repeat": ConstMethodIntr(),
"reshape": ConstMethodIntr(),
"resize": ConstMethodIntr(),
"right_shift": ConstFunctionIntr(),
"rint": ConstFunctionIntr(),
"roll": ConstFunctionIntr(),
"rollaxis": ConstFunctionIntr(),
"rot90": ConstFunctionIntr(),
"round": ConstMethodIntr(),
"round_": ConstMethodIntr(),
"searchsorted": ConstFunctionIntr(),
"select": ConstFunctionIntr(),
"shape": ConstFunctionIntr(),
"sign": ConstFunctionIntr(),
"signbit": ConstFunctionIntr(),
"sin": ConstFunctionIntr(),
"sinh": ConstFunctionIntr(),
"size": ConstFunctionIntr(return_range=prange.positive_values),
"sometrue": ConstFunctionIntr(),
"sort": ConstFunctionIntr(),
"sort_complex": ConstFunctionIntr(),
"spacing": ConstFunctionIntr(),
"split": ConstFunctionIntr(),
"sqrt": ConstFunctionIntr(),
"square": ConstFunctionIntr(),
"std_": ConstMethodIntr(args=('a', 'axis', 'dtype'),
defaults=(None, None)),
"subtract": ConstFunctionIntr(),
"sum": ConstMethodIntr(),
"swapaxes": ConstMethodIntr(),
"take": ConstMethodIntr(),
"tan": ConstFunctionIntr(),
"tanh": ConstFunctionIntr(),
"tile": ConstFunctionIntr(),
"trace": ConstMethodIntr(),
"transpose": ConstMethodIntr(),
"tri": ConstMethodIntr(),
"tril": ConstMethodIntr(),
"trim_zeros": ConstMethodIntr(),
"triu": ConstMethodIntr(),
"true_divide": ConstFunctionIntr(),
"trunc": ConstFunctionIntr(),
"uint16": ConstFunctionIntr(),
"uint32": ConstFunctionIntr(),
"uint64": ConstFunctionIntr(),
"uint8": ConstFunctionIntr(),
"union1d": ConstFunctionIntr(),
"unique": ConstFunctionIntr(),
"unwrap": ConstFunctionIntr(),
"var": ConstMethodIntr(),
"vstack": ConstFunctionIntr(),
"where": ConstFunctionIntr(),
"zeros": ConstFunctionIntr(args=('shape', 'dtype'),
defaults=("numpy.float64",)),
"zeros_like": ConstFunctionIntr(),
},
"time": {
"sleep": FunctionIntr(global_effects=True),
"time": FunctionIntr(global_effects=True),
},
"math": {
"isinf": ConstFunctionIntr(),
"modf": ConstFunctionIntr(),
"frexp": ConstFunctionIntr(),
"factorial": ConstFunctionIntr(),
"gamma": ConstFunctionIntr(),
"lgamma": ConstFunctionIntr(),
"trunc": ConstFunctionIntr(),
"erf": ConstFunctionIntr(),
"erfc": ConstFunctionIntr(),
"asinh": ConstFunctionIntr(),
"atanh": ConstFunctionIntr(),
"acosh": ConstFunctionIntr(),
"radians": ConstFunctionIntr(),
"degrees": ConstFunctionIntr(),
"hypot": ConstFunctionIntr(),
"tanh": ConstFunctionIntr(),
"cosh": ConstFunctionIntr(),
"sinh": ConstFunctionIntr(),
"atan": ConstFunctionIntr(),
"atan2": ConstFunctionIntr(),
"asin": ConstFunctionIntr(),
"tan": ConstFunctionIntr(),
"log": ConstFunctionIntr(),
"log1p": ConstFunctionIntr(),
"expm1": ConstFunctionIntr(),
"ldexp": ConstFunctionIntr(),
"fmod": ConstFunctionIntr(),
"fabs": ConstFunctionIntr(),
"copysign": ConstFunctionIntr(),
"acos": ConstFunctionIntr(),
"cos": ConstFunctionIntr(),
"sin": ConstFunctionIntr(),
"exp": ConstFunctionIntr(),
"sqrt": ConstFunctionIntr(),
"log10": ConstFunctionIntr(),
"isnan": ConstFunctionIntr(),
"ceil": ConstFunctionIntr(),
"floor": ConstFunctionIntr(),
"pow": ConstFunctionIntr(),
"pi": ConstantIntr(),
"e": ConstantIntr(),
},
"functools": {
"partial": FunctionIntr(),
},
"bisect": {
"bisect_left": ConstFunctionIntr(return_range=prange.positive_values),
"bisect_right": ConstFunctionIntr(return_range=prange.positive_values),
"bisect": ConstFunctionIntr(return_range=prange.positive_values),
},
"cmath": {
"cos": FunctionIntr(),
"sin": FunctionIntr(),
"exp": FunctionIntr(),
"sqrt": FunctionIntr(),
"log10": FunctionIntr(),
"isnan": FunctionIntr(),
"pi": ConstantIntr(),
"e": ConstantIntr(),
},
"itertools": {
"count": ReadOnceFunctionIntr(),
"imap": ReadOnceFunctionIntr(),
"ifilter": ReadOnceFunctionIntr(),
"islice": ReadOnceFunctionIntr(),
"product": ConstFunctionIntr(),
"izip": ReadOnceFunctionIntr(),
"combinations": ConstFunctionIntr(),
"permutations": ConstFunctionIntr(),
},
"random": {
"seed": FunctionIntr(global_effects=True),
"random": FunctionIntr(global_effects=True),
"randint": FunctionIntr(global_effects=True),
"randrange": FunctionIntr(global_effects=True),
"gauss": FunctionIntr(global_effects=True),
"uniform": FunctionIntr(global_effects=True),
"expovariate": FunctionIntr(global_effects=True),
"sample": FunctionIntr(global_effects=True),
"choice": FunctionIntr(global_effects=True),
"shuffle": FunctionIntr(global_effects=True),
},
"omp": {
"set_num_threads": FunctionIntr(global_effects=True),
"get_num_threads": FunctionIntr(global_effects=True),
"get_max_threads": FunctionIntr(global_effects=True),
"get_thread_num": FunctionIntr(global_effects=True),
"get_num_procs": FunctionIntr(global_effects=True),
"in_parallel": FunctionIntr(global_effects=True),
"set_dynamic": FunctionIntr(global_effects=True),
"get_dynamic": FunctionIntr(global_effects=True),
"set_nested": FunctionIntr(global_effects=True),
"get_nested": FunctionIntr(global_effects=True),
"init_lock": FunctionIntr(global_effects=True),
"destroy_lock": FunctionIntr(global_effects=True),
"set_lock": FunctionIntr(global_effects=True),
"unset_lock": FunctionIntr(global_effects=True),
"test_lock": FunctionIntr(global_effects=True),
"init_nest_lock": FunctionIntr(global_effects=True),
"destroy_nest_lock": FunctionIntr(global_effects=True),
"set_nest_lock": FunctionIntr(global_effects=True),
"unset_nest_lock": FunctionIntr(global_effects=True),
"test_nest_lock": FunctionIntr(global_effects=True),
"get_wtime": FunctionIntr(global_effects=True),
"get_wtick": FunctionIntr(global_effects=True),
"set_schedule": FunctionIntr(global_effects=True),
"get_schedule": FunctionIntr(global_effects=True),
"get_thread_limit": FunctionIntr(global_effects=True),
"set_max_active_levels": FunctionIntr(global_effects=True),
"get_max_active_levels": FunctionIntr(global_effects=True),
"get_level": FunctionIntr(global_effects=True),
"get_ancestor_thread_num": FunctionIntr(global_effects=True),
"get_team_size": FunctionIntr(global_effects=True),
"get_active_level": FunctionIntr(global_effects=True),
"in_final": FunctionIntr(global_effects=True),
},
"operator_": {
"lt": ConstFunctionIntr(),
"le": ConstFunctionIntr(),
"eq": ConstFunctionIntr(),
"ne": ConstFunctionIntr(),
"ge": ConstFunctionIntr(),
"gt": ConstFunctionIntr(),
"__lt__": ConstFunctionIntr(),
"__le__": ConstFunctionIntr(),
"__eq__": ConstFunctionIntr(),
"__ne__": ConstFunctionIntr(),
"__ge__": ConstFunctionIntr(),
"__gt__": ConstFunctionIntr(),
"not_": ConstFunctionIntr(),
"__not__": ConstFunctionIntr(),
"truth": ConstFunctionIntr(),
"is_": ConstFunctionIntr(),
"is_not": ConstFunctionIntr(),
"abs": ConstFunctionIntr(),
"__abs__": ConstFunctionIntr(),
"add": ConstFunctionIntr(),
"__add__": ConstFunctionIntr(),
"and_": ConstFunctionIntr(),
"__and__": ConstFunctionIntr(),
"div": ConstFunctionIntr(),
"__div__": ConstFunctionIntr(),
"floordiv": ConstFunctionIntr(),
"__floordiv__": ConstFunctionIntr(),
"inv": ConstFunctionIntr(),
"invert": ConstFunctionIntr(),
"__inv__": ConstFunctionIntr(),
"__invert__": ConstFunctionIntr(),
"lshift": ConstFunctionIntr(),
"__lshift__": ConstFunctionIntr(),
"mod": ConstFunctionIntr(),
"__mod__": ConstFunctionIntr(),
"mul": ConstFunctionIntr(),
"__mul__": ConstFunctionIntr(),
"neg": ConstFunctionIntr(),
"__neg__": ConstFunctionIntr(),
"or_": ConstFunctionIntr(),
"__or__": ConstFunctionIntr(),
"pos": ConstFunctionIntr(),
"__pos__": ConstFunctionIntr(),
"rshift": ConstFunctionIntr(),
"__rshift__": ConstFunctionIntr(),
"sub": ConstFunctionIntr(),
"__sub__": ConstFunctionIntr(),
"truediv": ConstFunctionIntr(),
"__truediv__": ConstFunctionIntr(),
"xor_": ConstFunctionIntr(),
"__xor__": ConstFunctionIntr(),
"concat": ConstFunctionIntr(),
"__concat__": ConstFunctionIntr(),
"iadd": MethodIntr(update_effects),
"__iadd__": MethodIntr(update_effects),
"iand": MethodIntr(update_effects),
"__iand__": MethodIntr(update_effects),
"iconcat": MethodIntr(update_effects),
"__iconcat__": MethodIntr(update_effects),
"idiv": MethodIntr(update_effects),
"__idiv__": MethodIntr(update_effects),
"ifloordiv": MethodIntr(update_effects),
"__ifloordiv__": MethodIntr(update_effects),
"ilshift": MethodIntr(update_effects),
"__ilshift__": MethodIntr(update_effects),
"imod": MethodIntr(update_effects),
"__imod__": MethodIntr(update_effects),
"imul": MethodIntr(update_effects),
"__imul__": MethodIntr(update_effects),
"ior": MethodIntr(update_effects),
"__ior__": MethodIntr(update_effects),
"ipow": MethodIntr(update_effects),
"__ipow__": MethodIntr(update_effects),
"irshift": MethodIntr(update_effects),
"__irshift__": MethodIntr(update_effects),
"isub": MethodIntr(update_effects),
"__isub__": MethodIntr(update_effects),
"itruediv": MethodIntr(update_effects),
"__itruediv__": MethodIntr(update_effects),
"ixor": MethodIntr(update_effects),
"__ixor__": MethodIntr(update_effects),
"contains": MethodIntr(update_effects),
"__contains__": ConstFunctionIntr(),
"countOf": ConstFunctionIntr(),
"delitem": FunctionIntr(
argument_effects=[UpdateEffect(), ReadEffect()]),
"__delitem__": FunctionIntr(
argument_effects=[UpdateEffect(), ReadEffect()]),
"getitem": ConstFunctionIntr(),
"__getitem__": ConstFunctionIntr(),
"indexOf": ConstFunctionIntr(),
"__theitemgetter__": ConstFunctionIntr(),
"itemgetter": MethodIntr(
return_alias=lambda _: {
MODULES['operator_']['__theitemgetter__']}
),
},
"string": {
"ascii_lowercase": ConstantIntr(),
"ascii_uppercase": ConstantIntr(),
"ascii_letters": ConstantIntr(),
"digits": ConstantIntr(),
"find": ConstFunctionIntr(),
"hexdigits": ConstantIntr(),
"octdigits": ConstantIntr(),
},
"os": {
"path": {
"join": ConstFunctionIntr(),
}
},
# conflicting method names must be listed here
"__dispatch__": {
"clear": MethodIntr(),
"conjugate": ConstMethodIntr(),
"copy": ConstMethodIntr(),
"count": ConstMethodIntr(return_range=prange.positive_values),
"next": MethodIntr(global_effects=True), # because of file.next
"pop": MethodIntr(),
"remove": MethodIntr(),
"update": MethodIntr(update_effects),
},
}
if sys.version_info[0] > 2:
sys.modules['__builtin__'] = sys.modules['builtins']
if 'VMSError' in sys.modules['__builtin__'].__dict__:
MODULES['__builtin__']['VMSError'] = ConstExceptionIntr()
if 'WindowsError' in sys.modules['__builtin__'].__dict__:
MODULES['__builtin__']['WindowsError'] = ConstExceptionIntr()
try:
__import__("omp")
except ImportError:
logger.warn("Pythran support disabled for module: omp")
del MODULES["omp"]
for method in MODULES['numpy'].keys():
if (method not in sys.modules['numpy'].__dict__ and not
(method[-1:] == '_' and method[:-1] in cxx_keywords and
method[:-1] in sys.modules['numpy'].__dict__)):
del MODULES['numpy'][method]
def save_arguments(module_name, elements):
""" Recursively save arguments name and default value. """
for elem, signature in elements.items():
if isinstance(signature, dict): # Submodule case
save_arguments(module_name + (elem,), signature)
else:
# use introspection to get the Python obj
try:
themodule = __import__(".".join(module_name))
obj = getattr(themodule, elem)
spec = inspect.getargspec(obj)
assert not signature.args.args
signature.args.args = [ast.Name(arg, ast.Param())
for arg in spec.args]
if spec.defaults:
signature.args.defaults = map(to_ast, spec.defaults)
except (AttributeError, ImportError, TypeError, ToNotEval):
pass
save_arguments((), MODULES)
def fill_constants_types(module_name, elements):
""" Recursively save arguments name and default value. """
for elem, intrinsic in elements.items():
if isinstance(intrinsic, dict): # Submodule case
fill_constants_types(module_name + (elem,), intrinsic)
elif isinstance(intrinsic, ConstantIntr):
# use introspection to get the Python constants types
cst = getattr(__import__(".".join(module_name)), elem)
intrinsic.return_type = NamedType(PYTYPE_TO_CTYPE_TABLE[type(cst)])
fill_constants_types((), MODULES)
methods = {}
def save_method(elements, module_path):
""" Recursively save methods with module name and signature. """
for elem, signature in elements.items():
if isinstance(signature, dict): # Submodule case
save_method(signature, module_path + (elem,))
elif isinstance(signature, Class):
save_method(signature.fields, module_path + (elem,))
elif signature.ismethod():
# in case of duplicates, there must be a __dispatch__ record
# and it is the only recorded one
if elem in methods and module_path[0] != '__dispatch__':
assert elem in MODULES['__dispatch__']
path = ('__dispatch__',)
methods[elem] = (path, MODULES['__dispatch__'][elem])
else:
methods[elem] = (module_path, signature)
save_method(MODULES, ())
functions = {}
def save_function(elements, module_path):
""" Recursively save functions with module name and signature. """
for elem, signature in elements.items():
if isinstance(signature, dict): # Submodule case
save_function(signature, module_path + (elem,))
elif signature.isstaticfunction():
functions.setdefault(elem, []).append((module_path, signature,))
elif isinstance(signature, Class):
save_function(signature.fields, module_path + (elem,))
save_function(MODULES, ())
attributes = {}
def save_attribute(elements, module_path):
""" Recursively save attributes with module name and signature. """
for elem, signature in elements.items():
if isinstance(signature, dict): # Submodule case
save_attribute(signature, module_path + (elem,))
elif signature.isattribute():
assert elem not in attributes # we need unicity
attributes[elem] = (module_path, signature,)
elif isinstance(signature, Class):
save_attribute(signature.fields, module_path + (elem,))
save_attribute(MODULES, ())
|
import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from django.forms import extras
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from education.water_polls_view_helper import get_location_for_water_view, get_all_responses
from poll.models import Poll
from education.models import EmisReporter, ScriptScheduleTime
from rapidsms.contrib.locations.models import Location
from script.models import ScriptProgress, Script
from unregister.models import Blacklist
@login_required
def schedule_water_polls(request):
schedule_form = ScheduleWaterPollForm()
context_dict = {}
if request.method == 'POST':
if request.POST['form_name'] == "water_poll_form":
schedule_form = ScheduleWaterPollForm(data=request.POST)
if schedule_form.is_valid():
schedule_date = schedule_form.cleaned_data['on_date']
head_teachers_group = Group.objects.get(name='Head Teachers')
water_script = Script.objects.get(slug='edtrac_script_water_source')
scheduled_for = schedule_script(schedule_date, get_valid_reporters(head_teachers_group), water_script)
ScriptScheduleTime.objects.create(script=water_script, scheduled_on=schedule_date)
context_dict['message'] = "Scheduled %s script for %s reporters" % (water_script.name, scheduled_for)
elif request.POST['form_name'] == "functional_water_poll_form":
today = datetime.date.today()
water_script = Script.objects.get(slug='edtrac_script_water_source')
functional_water_script = Script.objects.get(slug='edtrac_script_functional_water_source')
poll = Poll.objects.get(name="edtrac_water_source")
dt = _get_last_scheduled_date(water_script)
reporters = EmisReporter.objects.filter(
contact_ptr__in=poll.responses.filter(categories__category__name='yes', date__gte=dt).values_list(
'contact', flat=True).distinct())
scheduled_for = schedule_script(today, reporters, functional_water_script)
ScriptScheduleTime.objects.create(script=functional_water_script, scheduled_on=today)
context_dict['message'] = "Scheduled %s script for %s reporters" % (
functional_water_script.name, scheduled_for)
context_dict['form'] = schedule_form
return render_to_response('education/admin/schedule_water_polls.html', context_dict,
RequestContext(request))
def _get_last_scheduled_date(water_script):
scheduled_dates = ScriptScheduleTime.objects.filter(script=water_script).order_by('-scheduled_on').values_list(
'scheduled_on', flat=True)
return scheduled_dates[0] if len(scheduled_dates) > 0 else datetime.date.today()
class ScheduleWaterPollForm(forms.Form):
on_date = forms.DateField(label="Schedule Date: ", widget=extras.widgets.SelectDateWidget())
def clean_on_date(self):
on_date = self.cleaned_data['on_date']
if on_date < datetime.date.today():
raise forms.ValidationError('Can not Schedule on this Date')
return on_date
def schedule_script(date, reporters, script):
ScriptProgress.objects.filter(script=script).delete()
count = 0
for reporter in reporters:
if reporter.default_connection is not None:
sp = ScriptProgress.objects.create(time=date, connection=reporter.default_connection, script=script)
count += 1
return count
def get_valid_reporters(group):
return EmisReporter.objects.filter(groups=group, reporting_location__type__name='district').exclude(
connection__in=Blacklist.objects.values_list('connection', flat=True), schools=None)
def get_categories_and_data(responses):
responses.reverse()
categories = [response[0] for response in responses]
return categories ,[response[1].get('yes',0) for response in responses]
def get_label_for_poll(poll):
d = {
'edtrac_water_source':'water source',
'edtrac_functional_water_source':'functional water source',
'water_and_soap': 'water and soap'
}
return d.get(poll.name)
def _get_poll_data_dict(location, poll, time_range):
poll_data_dict = {}
response, monthly_response, percent = get_all_responses(poll, location, time_range)
categories, data = get_categories_and_data(monthly_response)
poll_data_dict['response'] = response
poll_data_dict['categories'] = categories
poll_data_dict['data'] = data
poll_data_dict['label'] = get_label_for_poll(poll)
poll_data_dict['school_percent'] = percent
return poll_data_dict
@login_required()
def detail_water_view(request,district=None):
district_water_form =None
partial_to_render =''
location, user_location = get_location_for_water_view(district,request)
profile = request.user.get_profile()
locations, user_location = [profile.location], profile.location.name
if profile.is_member_of('Ministry Officials') \
or profile.is_member_of('Admins') \
or profile.is_member_of('UNICEF Officials'):
district_water_form = DistrictWaterForm()
partial_to_render = 'education/admin/_district_water_form.html'
water_poll = Poll.objects.get(name='edtrac_water_source')
functional_water_poll = Poll.objects.get(
name='edtrac_functional_water_source')
water_and_soap_poll = Poll.objects.get(name='water_and_soap')
polls = [water_poll, functional_water_poll, water_and_soap_poll]
time_range = [
getattr(settings, 'SCHOOL_TERM_START'),
getattr(settings, 'SCHOOL_TERM_END')]
water_source_form = WaterForm()
if request.method == 'POST':
water_source_form = WaterForm(data=request.POST)
if water_source_form.is_valid():
to_date = water_source_form.cleaned_data['to_date']
from_date = water_source_form.cleaned_data['from_date']
time_range = [from_date, to_date]
data_list = []
for poll in polls:
data_list.append(_get_poll_data_dict(location, poll, time_range))
#data_list = [_get_poll_data_dict(location, poll, time_range)
#for poll in polls]
time_period = "Data shown for time: %s to %s" % (
time_range[0].strftime("%d %B %Y"),
time_range[1].strftime("%d %B %Y"))
variables = {
'data_list': data_list,
'form': water_source_form,
'partial_to_render': partial_to_render,
'user_location': user_location,
'location': location,
'time_period': time_period,
'district_form': district_water_form}
return render_to_response(
'education/admin/detail_water.html',
variables,
RequestContext(request))
@login_required()
def district_water_view(request):
if request.method == 'POST':
district = Location.objects.filter(id=request.POST['district_choices'], type='district')
if district:
return HttpResponseRedirect(reverse('detail-water-view', kwargs={'district':district[0].pk}))
return HttpResponseRedirect(reverse('detail-water-view'))
class WaterForm(forms.Form):
from_date = forms.DateTimeField()
to_date = forms.DateTimeField()
def clean(self):
data = self.cleaned_data
if data.get('from_date') > data.get('to_date'):
raise forms.ValidationError("To date less than from date")
return data
class DistrictWaterForm(forms.Form):
district_list = list(Location.objects.filter(type='district'))
district_choices = forms.ChoiceField(choices=[(district.id, district.name) for district in district_list])
|
import sys
import traceback
from urllib import urlencode, unquote
from urlparse import parse_qs
import re
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ConnectionRefusedError
from twisted.web import http
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from vumi.transports.base import Transport
from vumi.utils import http_request_full, normalize_msisdn
from vumi import log
class YoHttpTransport(Transport):
tranport_type = 'sms'
def mkres(self, cls, publish_func, path_key):
resource = cls(self.config, publish_func)
self._resources.append(resource)
return (resource, self.config['receive_path'])
@inlineCallbacks
def setup_transport(self):
log.msg("Setup yo transport %s" % self.config)
super(YoHttpTransport, self).setup_transport()
self._resources = []
resources = [self.mkres(YoReceiveSMSResource,
self.publish_message,
self.config['receive_path'])]
self.web_resources = yield self.start_web_resources(
resources, self.config['receive_port'])
def teardown_transport(self):
log.msg("STOP YO Transport")
if hasattr(self, 'web_resources'):
return self.web_resources.stopListening()
@inlineCallbacks
def handle_outbound_message(self, message):
log.msg("Outbound message %s" % repr(message))
try:
origin = (self.config['default_origin'] if not "customized_id" in message['transport_metadata'] else message['transport_metadata']['customized_id'])
params = {
'ybsacctno': self.config['ybsacctno'],
'password': self.config['password'],
'origin': origin,
'sms_content': message['content'].encode('utf-8'),
'destinations': message['to_addr'],
}
log.msg('Hitting %s with %s' % (self.config['url'], urlencode(params)))
response = yield http_request_full(
"%s?%s" % (self.config['url'], urlencode(params)),
"",
{'User-Agent': ['Vumi Yo Transport'],
'Content-Type': ['application/json;charset=UTF-8'], },
'GET')
if response.code != 200:
reason = "HTTP ERROR %s - %s" % (response.code, response.delivered_body)
log.error(reason)
yield self.publish_nack(message['message_id'], reason)
return
response_attr = parse_qs(unquote(response.delivered_body))
[ybs_status] = response_attr['ybs_autocreate_status']
ybs_msg = response_attr['ybs_autocreate_message'][0] if 'ybs_autocreate_message' in response_attr else None
if (ybs_status == 'ERROR'):
reason = "SERVICE ERROR %s - %s" % (ybs_status, ybs_msg)
log.error(reason)
yield self.publish_nack(message['message_id'], reason)
return
yield self.publish_ack(
user_message_id=message['message_id'],
sent_message_id=message['message_id'])
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(
"TRANSPORT ERROR: %r" %
traceback.format_exception(exc_type, exc_value, exc_traceback))
reason = "TRANSPORT ERROR %s" % (ex.message)
yield self.publish_nack(message['message_id'], reason)
class YoReceiveSMSResource(Resource):
isLeaf = True
def __init__(self, config, publish_func):
log.msg("Init ReceiveSMSResource %s" % (config))
self.config = config
self.publish_func = publish_func
self.transport_name = self.config['transport_name']
@inlineCallbacks
def do_render(self, request):
log.msg('got hit with %s' % request.args)
request.setResponseCode(http.OK)
request.setHeader('Content-Type', 'text/plain')
try:
yield self.publish_func(
transport_name=self.transport_name,
transport_type='sms',
to_addr=(request.args['code'][0] if request.args['code'][0]!='' else self.config['default_origin']),
from_addr=request.args['sender'][0],
content=request.args['message'][0],
transport_metadata={}
)
except Exception, e:
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
log.msg("Error processing the request: %s" % (request,))
request.finish()
def render(self, request):
self.do_render(request)
return NOT_DONE_YET
|
__author__ = 'Juan'
from SurveyDataViewer.settings.base import *
DEBUG = True
TEMPLATE_DEBUG = True
STATIC_URL = '/static/'
SITE_URL = ''
MEDIA_ROOT = data["media_files_dir"]
MEDIA_URL = '/surveydata/'
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas.compat import lrange, PY3, text_type, lmap
from pandas import (Period, PeriodIndex, period_range, offsets, date_range,
Series, Index)
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
pytest.raises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
pytest.raises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
pytest.raises(ValueError, PeriodIndex, start=start, end=end)
pytest.raises(ValueError, PeriodIndex, start=start)
pytest.raises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
pytest.raises(ValueError, PeriodIndex, idx._ndarray_values)
pytest.raises(ValueError, PeriodIndex, list(idx._ndarray_values))
pytest.raises(TypeError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
pytest.raises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with tm.assert_raises_regex(ValueError, 'freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')],
name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')]),
name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
# GH#13079
with pytest.raises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
with pytest.raises(TypeError):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
pytest.raises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
pytest.raises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])
@pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]):
pidx = func(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'start and end must have same freq'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start, end=end_intv)
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start)
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',
'T', 'S', 'L', 'U', 'N', 'H'])
def test_recreate_from_data(self, freq):
org = PeriodIndex(start='2001/04/01', freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = Index(lmap(t, raw))
res = index.map(t)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
with pytest.raises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10), dtype=object)
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
|
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'installname'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetInstallname(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
m = r.search(o)
assert m
return m.group(1)
if (GetInstallname('libdefault_installname.dylib') !=
'/usr/local/lib/libdefault_installname.dylib'):
test.fail_test()
if (GetInstallname('My Framework.framework/My Framework') !=
'/Library/Frameworks/My Framework.framework/'
'Versions/A/My Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname.dylib') !=
'Trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('libexplicit_installname_base.dylib') !=
'@executable_path/../../../libexplicit_installname_base.dylib'):
test.fail_test()
if (GetInstallname('My Other Framework.framework/My Other Framework') !=
'@executable_path/../../../My Other Framework.framework/'
'Versions/A/My Other Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_base.dylib') !=
'/usr/local/lib/libexplicit_installname_with_base.dylib'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') !=
'@executable_path/../libexplicit_installname_with_explicit_base.dylib'):
test.fail_test()
if (GetInstallname('libboth_base_and_installname.dylib') !=
'Still trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('install_name_with_info_plist.framework/'
'install_name_with_info_plist') !=
'/Library/Frameworks/install_name_with_info_plist.framework/'
'Versions/A/install_name_with_info_plist'):
test.fail_test()
if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in
test.stdout()):
test.fail_test()
test.pass_test()
|
class StorageProperties(object):
def __init__(self):
self.compression = None
self.chunked = None
def getCompression(self):
return self.compression
def setCompression(self, compression):
self.compression = compression
def getChunked(self):
return self.chunked
def setChunked(self, chunked):
self.chunked = chunked
|
from sequana import snpeff
from easydev import TempFile
import os
from . import test_dir
sharedir=f"{test_dir}/data/vcf"
def test_snpeff():
# a custom refrence
fh_log = TempFile()
mydata = snpeff.SnpEff(annotation=f"{sharedir}/JB409847.gbk", log=fh_log.name)
with TempFile() as fh:
mydata.launch_snpeff(f"{sharedir}/JB409847.vcf", fh.name)
fh_log.delete()
# cleanup
try:
os.remove("snpEff.config")
except:
pass
try:
os.remove("snpEff_genes.txt")
except:
pass
try:
os.remove("snpEff_summary.html")
except:
pass
try:
snpeff.SnpEff(annotation="dummy")
assert False
except SystemExit:
assert True
except:
assert False
def test_snpeff_download():
with TempFile() as fh:
snpeff.download_fasta_and_genbank("K01711", fh.name)
with TempFile() as fh:
try:
snpeff.download_fasta_and_genbank("dummyK01711", fh.name)
assert False
except ValueError:
assert True
except:
assert False
def test_add_locus_no_modification():
mydata = snpeff.SnpEff(annotation=f"{sharedir}/JB409847.gbk")
with TempFile() as fh:
fastafile = f"{sharedir}/JB409847.fasta"
mydata.add_locus_in_fasta(fastafile, fh.name)
# cleanup
try:
os.remove("snpEff.config")
except:
pass
def test_add_locus_with_modification():
# Alter the original GBK to alter the locus name
data = open(f"{sharedir}/JB409847.gbk", "r").read()
newdata = data.replace("JB409847", "DUMMY_JB409847")
fh = TempFile(suffix=".gbk")
print(fh.name)
with open(fh.name, 'w') as fout:
fout.write(newdata)
# Now we read this new GBK file that has a different locus name as
# compared to the fasta
mydata = snpeff.SnpEff(annotation=fh.name)
# Here is the corresponding FASTA
fasta = f"{sharedir}/JB409847.fasta"
with TempFile(suffix="fasta") as fh2:
mydata.add_locus_in_fasta(fasta, fh2.name)
# In theory, in the newly created fasta file, we should find back the
# DUMMY tag
# cleanup
try:
os.remove("snpEff.config")
except:
pass
data = open(fh2.name, "r").read()
assert "DUMMY" in data
fh.delete()
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("orgs", "0024_populate_org_backend")]
operations = [
migrations.AlterField(
model_name="orgbackend",
name="api_token",
field=models.CharField(default="", help_text="The API token for this backend", max_length=128),
preserve_default=False,
),
migrations.AlterField(
model_name="orgbackend",
name="backend_type",
field=models.CharField(default="", max_length=256),
preserve_default=False,
),
migrations.AlterField(
model_name="orgbackend",
name="host",
field=models.CharField(default="", max_length=128),
preserve_default=False,
),
migrations.AlterUniqueTogether(name="orgbackend", unique_together=set([("org", "slug")])),
]
|
from __future__ import absolute_import, unicode_literals
from django.http import Http404
from django.shortcuts import render
from django.template.loader import get_template
def learnings(request, slug):
file_name = (slug.replace('-', '_') + '.html').lower()
try:
t = get_template('learnings/' + file_name)
except:
raise Http404("Sorry no such page exists")
return render(request, 'learnings/' + file_name)
def projects(request, slug):
file_name = (slug.replace('-', '_') + '.html').lower()
try:
t = get_template('projects/' + file_name)
except:
raise Http404("Sorry no such page exists")
return render(request, 'projects/' + file_name)
def writings(request, slug):
file_name = (slug.replace('-', '_') + '.html').lower()
try:
t = get_template('writings/' + file_name)
except:
raise Http404("Sorry no such page exists")
return render(request, 'writings/' + file_name)
|
from collections import deque
import psutil
from .compatibility import WINDOWS
from .metrics import time
class SystemMonitor:
def __init__(self, n=10000):
self.proc = psutil.Process()
self.time = deque(maxlen=n)
self.cpu = deque(maxlen=n)
self.memory = deque(maxlen=n)
self.count = 0
self.quantities = {"cpu": self.cpu, "memory": self.memory, "time": self.time}
try:
ioc = psutil.net_io_counters()
except Exception:
self._collect_net_io_counters = False
else:
self.last_time = time()
self.read_bytes = deque(maxlen=n)
self.write_bytes = deque(maxlen=n)
self.quantities["read_bytes"] = self.read_bytes
self.quantities["write_bytes"] = self.write_bytes
self._last_io_counters = ioc
self._collect_net_io_counters = True
if not WINDOWS:
self.num_fds = deque(maxlen=n)
self.quantities["num_fds"] = self.num_fds
self.update()
def recent(self):
try:
return {k: v[-1] for k, v in self.quantities.items()}
except IndexError:
return {k: None for k, v in self.quantities.items()}
def update(self):
with self.proc.oneshot():
cpu = self.proc.cpu_percent()
memory = self.proc.memory_info().rss
now = time()
self.cpu.append(cpu)
self.memory.append(memory)
self.time.append(now)
self.count += 1
result = {"cpu": cpu, "memory": memory, "time": now, "count": self.count}
if self._collect_net_io_counters:
try:
ioc = psutil.net_io_counters()
except Exception:
pass
else:
last = self._last_io_counters
duration = now - self.last_time
read_bytes = (ioc.bytes_recv - last.bytes_recv) / (duration or 0.5)
write_bytes = (ioc.bytes_sent - last.bytes_sent) / (duration or 0.5)
self.last_time = now
self._last_io_counters = ioc
self.read_bytes.append(read_bytes)
self.write_bytes.append(write_bytes)
result["read_bytes"] = read_bytes
result["write_bytes"] = write_bytes
if not WINDOWS:
num_fds = self.proc.num_fds()
self.num_fds.append(num_fds)
result["num_fds"] = num_fds
return result
def __repr__(self):
return "<SystemMonitor: cpu: %d memory: %d MB fds: %d>" % (
self.cpu[-1],
self.memory[-1] / 1e6,
-1 if WINDOWS else self.num_fds[-1],
)
def range_query(self, start):
if start == self.count:
return {k: [] for k in self.quantities}
istart = start - (self.count - len(self.cpu))
istart = max(0, istart)
seq = [i for i in range(istart, len(self.cpu))]
d = {k: [v[i] for i in seq] for k, v in self.quantities.items()}
return d
|
"""
python generate_sparsetools.py
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
'l': 64-bit integer scalar
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
BSR_ROUTINES = """
bsr_diagonal v iiiiiIIT*T
bsr_tocsr v iiiiIIT*I*I*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat_pass2 v iiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
CSC_ROUTINES = """
csc_diagonal v iiiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_pass1 v iiIIII*I
csc_matmat_pass2 v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
CSR_ROUTINES = """
csr_matmat_pass1 v iiIIII*I
csr_matmat_pass2 v iiIITIIT*I*I*T
csr_diagonal v iiiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_todense v iiIIT*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_todense v iilIIT*Ti
coo_matvec v lIITT*T
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
THUNK_TEMPLATE = """
static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
elif t == 'l':
args.append("*(%snpy_int64*)a[%d]" % (const, j))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
if T_type is None:
dispatch = "%s" % (I_type,)
else:
dispatch = "%s,%s" % (I_type, T_type)
if 'B' in arg_spec:
dispatch += ",npy_bool_wrapper"
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s<%(dispatch)s>(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s<%(dispatch)s>(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name,
dispatch=dispatch)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=__doc__.strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError:
raise ValueError("Malformed line: %r" % (line,))
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('snh.views',
#ROOT
(r'^$', 'index'),
#TWITTER
(r'^tw/(?P<harvester_id>\d+)$', 'tw'),
(r'^tw_user_detail/(?P<harvester_id>\d+)/(?P<screen_name>\w+)/$', 'tw_user_detail'),
(r'^tw_search_detail/(?P<harvester_id>\d+)/(?P<search_id>\d+)/$', 'tw_search_detail'),
(r'^tw_status_detail/(?P<harvester_id>\d+)/(?P<status_id>\d+)/$', 'tw_status_detail'),
#TWITTER AJAX
(r'^get_tw_list/(?P<call_type>[\w\.]+)/(?P<harvester_id>\d+)/$', 'get_tw_list'),
(r'^get_twsearch_list/(?P<call_type>[\w\.]+)/(?P<harvester_id>\d+)/$', 'get_twsearch_list'),
(r'^get_tw_status_list/(?P<call_type>[\w\.]+)/(?P<screen_name>\w+)/$', 'get_tw_status_list'),
(r'^get_tw_statussearch_list/(?P<call_type>[\w\.]+)/(?P<screen_name>\w+)/$', 'get_tw_statussearch_list'),
(r'^get_tw_searchdetail_list/(?P<call_type>[\w\.]+)/(?P<search_id>\d+)/$', 'get_tw_searchdetail_list'),
(r'^get_status_chart/(?P<harvester_id>\d+)/(?P<screen_name>\w+)/$', 'get_status_chart'),
(r'^get_at_chart/(?P<harvester_id>\d+)/(?P<screen_name>\w+)/$', 'get_at_chart'),
#FACEBOOK
(r'^request_fb_token$', 'request_fb_token'),
(r'^test_fb_token$', 'test_fb_token'),
(r'^fb/(?P<harvester_id>\d+)$', 'fb'),
(r'^fb_user_detail/(?P<harvester_id>\d+)/(?P<username>[\w\.]+)/$', 'fb_user_detail'),
(r'^fb_user_detail/(?P<harvester_id>\d+)/fid/(?P<userfid>[\w\.]+)/$', 'fb_userfid_detail'),
(r'^fb_post_detail/(?P<harvester_id>\d+)/(?P<post_id>[\w\.]+)/$', 'fb_post_detail'),
#FACEBOOK AJAX
(r'^get_fb_list/(?P<call_type>[\w\.]+)/(?P<harvester_id>\d+)/$', 'get_fb_list'),
(r'^get_fb_post_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_fb_post_list'),
(r'^get_fb_otherpost_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_fb_otherpost_list'),
(r'^get_fb_comment_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_fb_comment_list'),
(r'^get_fb_postcomment_list/(?P<call_type>[\w\.]+)/(?P<postfid>[\w\.]+)/$', 'get_fb_postcomment_list'),
(r'^get_fb_likes_list/(?P<call_type>[\w\.]+)/(?P<postfid>[\w\.]+)/$', 'get_fb_likes_list'),
(r'^get_wall_chart/(?P<harvester_id>\d+)/(?P<userfid>[\w\.]+)/$', 'get_wall_chart'),
(r'^get_otherwall_chart/(?P<harvester_id>\d+)/(?P<userfid>[\w\.]+)/$', 'get_otherwall_chart'),
(r'^get_comment_chart/(?P<harvester_id>\d+)/(?P<userfid>[\w\.]+)/$', 'get_comment_chart'),
(r'^get_commentpost_chart/(?P<harvester_id>\d+)/(?P<postfid>[\w\.]+)/$', 'get_commentpost_chart'),
#DAILYMOTION
(r'^dm/(?P<harvester_id>\d+)$', 'dm'),
(r'^dm_user_detail/(?P<harvester_id>\d+)/fid/(?P<userfid>[\w\.]+)/$', 'dm_user_detail'),
(r'^dm_video_detail/(?P<harvester_id>\d+)/(?P<videoid>[\w\.]+)/$', 'dm_video_detail'),
#DAILYMOTION AJAX
(r'^get_dm_list/(?P<call_type>[\w\.]+)/(?P<harvester_id>\d+)/$', 'get_dm_list'),
(r'^get_dm_video_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_dm_video_list'),
(r'^get_dm_fans_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_dm_fans_list'),
(r'^get_dm_friends_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_dm_friends_list'),
(r'^get_dm_following_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_dm_following_list'),
(r'^get_dm_comment_list/(?P<call_type>[\w\.]+)/(?P<userfid>[\w\.]+)/$', 'get_dm_comment_list'),
(r'^get_dm_videocomment_list/(?P<call_type>[\w\.]+)/(?P<videofid>[\w\.\*]+)/$', 'get_dm_videocomment_list'),
(r'^get_dmvideo_chart/(?P<harvester_id>\d+)/(?P<userfid>[\w\.]+)/$', 'get_dmvideo_chart'),
(r'^get_dmcomment_chart/(?P<harvester_id>\d+)/(?P<userfid>[\w\.]+)/$', 'get_dmcomment_chart'),
(r'^get_dmvideocomment_chart/(?P<harvester_id>\d+)/(?P<videofid>[\w\.]+)/$', 'get_dmvideocomment_chart'),
#YOUTUBE
(r'^yt/(?P<harvester_id>\d+)$', 'yt'),
(r'^yt_user_detail/(?P<harvester_id>\d+)/fid/(?P<userfid>.*)/$', 'yt_user_detail'),
(r'^yt_video_detail/(?P<harvester_id>\d+)/(?P<videoid>.*)/$', 'yt_video_detail'),
#YOUTUBE AJAX
(r'^get_yt_list/(?P<call_type>[\w\.]+)/(?P<harvester_id>\d+)/$', 'get_yt_list'),
(r'^get_yt_video_list/(?P<call_type>[\w\.]+)/(?P<userfid>.*)/$', 'get_yt_video_list'),
(r'^get_yt_comment_list/(?P<call_type>[\w\.]+)/(?P<userfid>.*)/$', 'get_yt_comment_list'),
(r'^get_yt_videocomment_list/(?P<call_type>[\w\.]+)/(?P<videofid>.*)/$', 'get_yt_videocomment_list'),
(r'^get_ytvideo_chart/(?P<harvester_id>\d+)/fid/(?P<userfid>.*)/$', 'get_ytvideo_chart'),
(r'^get_ytcomment_chart/(?P<harvester_id>\d+)/fid/(?P<userfid>.*)/$', 'get_ytcomment_chart'),
(r'^get_ytvideocomment_chart/(?P<harvester_id>\d+)/fid/(?P<videofid>.*)/$', 'get_ytvideocomment_chart'),
#(r'^$', 'index'),
#(r'^reset_fb_token$', 'reset_fb_token'),
#(r'^request_fb_token$', 'request_fb_token'),
#(r'^test_fb_token$', 'test_fb_token'),
#(r'^twitter_status/(?P<status_id>\d+)/$', 'twitter_status'),
#(r'^twitter_search_detail/(?P<search_pmkid>\w+)/$', 'twitter_search_detail'),
#(r'^twitter/(?P<harvester_id>\d+)/$', 'twitter'),
#(r'^facebook_post/(?P<post_id>\w+)/$', 'facebook_post'),
#(r'^facebook_detail/(?P<user_id>\d+)/$', 'facebook_detail'),
#(r'^facebook/(?P<harvester_id>\d+)/$', 'facebook'),
)
|
"""
Copyright (C) 2018 Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
Date: 01/11/2018
This module implements infrastructure to store statements
"""
from intrepyd.visitable import Visitable
from expression import TRUE
class Assignment(Visitable):
"""
Stores an assignment
"""
def __init__(self, lhs, rhs):
self._lhs = lhs
self._rhs = rhs
@property
def lhs(self):
"""
Getter
"""
return self._lhs
@property
def rhs(self):
"""
Getter
"""
return self._rhs
class IfThenElse(Visitable):
"""
Stores an if-then-else
"""
def __init__(self, conditions, stmt_blocks):
self._conditions = conditions
self._stmt_blocks = stmt_blocks
if len(self._conditions) == len(self._stmt_blocks):
return
if len(self._conditions) == len(self._stmt_blocks) - 1:
# chain with final else
self._conditions.append(TRUE)
return
raise RuntimeError('Wrong number of conditions in if')
@property
def conditions(self):
"""
Getter
"""
return self._conditions
@property
def stmt_blocks(self):
"""
Getter
"""
return self._stmt_blocks
class Case(Visitable):
"""
Stores a case statement
"""
def __init__(self, expression, selections, stmt_blocks):
self._expression = expression
self._selections = selections
self._stmt_blocks = stmt_blocks
if len(self._selections) == len(self._stmt_blocks):
return
if len(self._selections) == len(self._stmt_blocks) - 1:
# chain with final else
self._selections.append(expression)
return
raise RuntimeError('Wrong number of conditions in case')
@property
def expression(self):
"""
Getter
"""
return self._expression
@property
def selections(self):
"""
Getter
"""
return self._selections
@property
def stmt_blocks(self):
"""
Getter
"""
return self._stmt_blocks
|
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from presentations.models import *
class PresentationSitemap(Sitemap):
changefreq = "monthly"
priority = 0.1
def item(self):
return Presentation.objects.all()
def lastmod(self, obj):
return obj.presentation_date
|
from __future__ import print_function
from builtins import str
import sys
import pmagpy.pmag as pmag
def main():
"""
NAME
sites_locations.py
DESCRIPTION
reads in er_sites.txt file and finds all locations and bounds of locations
outputs er_locations.txt file
SYNTAX
sites_locations.py [command line options]
OPTIONS
-h prints help message and quits
-f: specimen input er_sites format file, default is "er_sites.txt"
-F: locations table: default is "er_locations.txt"
"""
site_file="er_sites.txt"
loc_file="er_locations.txt"
Names,user=[],"unknown"
Done=[]
version_num=pmag.get_version()
args=sys.argv
dir_path='.'
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-f' in args:
ind=args.index("-f")
site_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
loc_file=args[ind+1]
#
site_file=dir_path+'/'+site_file
loc_file=dir_path+'/'+loc_file
Sites,file_type=pmag.magic_read(site_file)
if file_type != 'er_sites':
print(file_type)
print(file_type,"This is not a valid er_sites file ")
sys.exit()
# read in site data
#
LocNames,Locations=[],[]
for site in Sites:
if site['er_location_name'] not in LocNames: # new location name
LocNames.append(site['er_location_name'])
sites_locs=pmag.get_dictitem(Sites,'er_location_name',site['er_location_name'],'T') # get all sites for this loc
lats=pmag.get_dictkey(sites_locs,'site_lat','f') # get all the latitudes as floats
lons=pmag.get_dictkey(sites_locs,'site_lon','f') # get all the longitudes as floats
LocRec={'er_citation_names':'This study','er_location_name':site['er_location_name'],'location_type':''}
LocRec['location_begin_lat']=str(min(lats))
LocRec['location_end_lat']=str(max(lats))
LocRec['location_begin_lon']=str(min(lons))
LocRec['location_end_lon']=str(max(lons))
Locations.append(LocRec)
if len(Locations)>0:
pmag.magic_write(loc_file,Locations,"er_locations")
print("Locations written to: ",loc_file)
if __name__ == "__main__":
main()
|
"""
zbx.config.hosts
~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
__all__ = ['Host', 'Interface', 'Template']
from .bases import Model
from .fields import Field, SetField
class Template(Model):
"""
Template model
"""
xml_tag = 'template'
name = Field()
template = Field()
groups = SetField(model='Group')
applications = SetField(model='Application')
items = SetField(model='Item')
discovery_rules = SetField(model='DiscoveryRule')
macros = SetField(model='Macro', allow_empty=True)
screens = SetField(model='Screen')
graphs = SetField(model='Graph')
triggers = SetField(model='Trigger')
def __init__(self, name, **fields):
self.name = name
fields.setdefault('template', name)
self.update(fields)
class Host(Model):
"""
Host model
"""
xml_tag = 'host'
host = Field(description='Host name')
name = Field(description='Visible host name')
proxy = Field('', description='Proxy name')
status = Field(0, choices=(
(0, 'monitored'),
(1, 'unmonitored'),
), description='Host Status')
ipmi_authtype = Field(-1, description='IPMI authentication type')
ipmi_privilege = Field(2, description='IPMI privilege')
ipmi_username = Field('', description='IPMI username')
ipmi_password = Field('', description='IPMI password')
templates = SetField(model='Template', allow_empty=True)
groups = SetField(model='Group')
interfaces = SetField(model='Interface')
applications = SetField(model='Application')
items = SetField(model='Item', allow_empty=True)
discovery_rules = SetField(model='DiscoveryRule', allow_empty=True)
description = Field()
graphs = SetField(model='Graph')
macros = SetField(model='Macro', allow_empty=True)
inventory = Field('')
def __init__(self, name, **fields):
self.name = name
self.host = fields.pop('host', self.name)
self.update(fields)
class Interface(Model):
"""
Interface model
"""
xml_tag = 'interface'
ip = Field(description='IP address, can be either IPv4 or IPv6')
dns = Field('', description='DNS name')
port = Field(description='Port number')
type = Field(1, choices=(
(1, 'agent'),
(2, 'SNMP'),
(3, 'IPMI'),
(4, 'JMX')
), description='Interface type')
useip = Field(0, choices=(
(0, 'connect to the host using DNS name'),
(1, 'connect to the host using IP address')
), description='How to connect to the host')
default = Field(0, choices=(
(0, 'Not default interface'),
(1, 'Default interface')
), description='Interface status')
interface_ref = Field('if1', description='Interface reference name '
'to be used in items.')
def __init__(self, ident, **fields):
ip, port, dns = '', '', ''
if ':' in ident:
ident, sep, b = ident.rpartition(':')
port = int(b)
if ident:
if ident.split('.')[-1].isdigit():
ip = ident
else:
dns = ident
fields.setdefault('ip', ip)
fields.setdefault('dns', dns)
fields.setdefault('port', port)
fields.setdefault('useip', False if fields['dns'] else True)
self.update(fields)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('convos', '0003_auto_20150511_0440'),
]
operations = [
migrations.AlterField(
model_name='convomessage',
name='body',
field=models.CharField(max_length=64000),
),
]
|
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import (eigs, eigsh, arpack,
ArpackNoConvergence)
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
|
"""Django settings for hawk project."""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
SECRET_KEY = '*ip7#(8)u&+*rx%30qywt*9z&oq3w1=u#n!#u6^2u*paobxlv^'
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0', '127.0.0.1']
INSTALLED_APPS = [
# Local application
'prox',
'shop',
# All auth requirements
'allauth',
'allauth.account',
'allauth.socialaccount',
# Custom admin panel Jet
'jet',
# Third-party widget tweak
'widget_tweaks',
# Django default
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hawk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hawk.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'dev.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-EN'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'deployment/static')
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'deployment/media')
EMAIL_HOST = 'smtp.mail.ru'
EMAIL_PORT = 2525
EMAIL_HOST_USER = "example@example.com"
EMAIL_HOST_PASSWORD = "password"
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
LOGIN_REDIRECT_URL = 'shop:index'
SITE_ID = 1
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('sampleapp1/', include('sampleapp1.urls')),
path('admin/', admin.site.urls),
]
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0);
|
"""
-----------------------------------------------------------------------
This module implements gamma- and zeta-related functions:
* Bernoulli numbers
* Factorials
* The gamma function
* Polygamma functions
* Harmonic numbers
* The Riemann zeta function
* Constants related to these functions
-----------------------------------------------------------------------
"""
import math
from backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy
from libintmath import list_primes, ifac, moebius
from libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
lshift, sqrt_fixed,
fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan,
from_int, to_int, to_fixed, from_man_exp, from_rational,
mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub,
mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int,
mpf_rdiv_int,
mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift,
negative_rnd, reciprocal_rnd,
)
from libelefun import (\
constant_memo,
def_mpf_constant,
mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2,
mpf_exp, mpf_log, mpf_pow, mpf_cosh,
mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi,
)
from libmpc import (\
mpc_zero, mpc_one, mpc_half, mpc_two,
mpc_abs, mpc_shift, mpc_pos, mpc_neg,
mpc_add, mpc_sub, mpc_mul, mpc_div,
mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div,
mpc_mul_int, mpc_pow_int,
mpc_log, mpc_exp, mpc_pow,
mpc_cos_pi, mpc_sin_pi,
mpc_reciprocal, mpc_square
)
@constant_memo
def catalan_fixed(prec):
prec = prec + 20
a = one = MPZ_ONE << prec
s, t, n = 0, 1, 1
while t:
a *= 32 * n**3 * (2*n-1)
a //= (3-16*n+16*n**2)**2
t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1))
s += t
n += 1
return s >> (20 + 6)
@constant_memo
def khinchin_fixed(prec):
wp = int(prec + prec**0.5 + 15)
s = MPZ_ZERO
fac = from_int(4)
t = ONE = MPZ_ONE << wp
pi = mpf_pi(wp)
pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2)
n = 1
while 1:
zeta2n = mpf_abs(mpf_bernoulli(2*n, wp))
zeta2n = mpf_mul(zeta2n, pipow, wp)
zeta2n = mpf_div(zeta2n, fac, wp)
zeta2n = to_fixed(zeta2n, wp)
term = (((zeta2n - ONE) * t) // n) >> wp
if term < 100:
break
#if not n % 10:
# print n, math.log(int(abs(term)))
s += term
t += ONE//(2*n+1) - ONE//(2*n)
n += 1
fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp)
pipow = mpf_mul(pipow, twopi2, wp)
s = (s << wp) // ln2_fixed(wp)
K = mpf_exp(from_man_exp(s, -wp), wp)
K = to_fixed(K, prec)
return K
@constant_memo
def glaisher_fixed(prec):
wp = prec + 30
# Number of direct terms to sum before applying the Euler-Maclaurin
# formula to the tail. TODO: choose more intelligently
N = int(0.33*prec + 5)
ONE = MPZ_ONE << wp
# Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1
s = MPZ_ZERO
for k in range(2, N):
#print k, N
s += log_int_fixed(k, wp) // k**2
logN = log_int_fixed(N, wp)
#logN = to_fixed(mpf_log(from_int(N), wp+20), wp)
# E-M step 2: integral of log(x)/x**2 from N to inf
s += (ONE + logN) // N
# E-M step 3: endpoint correction term f(N)/2
s += logN // (N**2 * 2)
# E-M step 4: the series of derivatives
pN = N**3
a = 1
b = -2
j = 3
fac = from_int(2)
k = 1
while 1:
# D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative]
D = ((a << wp) + b*logN) // pN
D = from_man_exp(D, -wp)
B = mpf_bernoulli(2*k, wp)
term = mpf_mul(B, D, wp)
term = mpf_div(term, fac, wp)
term = to_fixed(term, wp)
if abs(term) < 100:
break
#if not k % 10:
# print k, math.log(int(abs(term)), 10)
s -= term
# Advance derivative twice
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
k += 1
fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp)
# A = exp((6*s/pi**2 + log(2*pi) + euler)/12)
pi = pi_fixed(wp)
s *= 6
s = (s << wp) // (pi**2 >> wp)
s += euler_fixed(wp)
s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp)
s //= 12
A = mpf_exp(from_man_exp(s, -wp), wp)
return to_fixed(A, prec)
@constant_memo
def apery_fixed(prec):
prec += 20
d = MPZ_ONE << prec
term = MPZ(77) << prec
n = 1
s = MPZ_ZERO
while term:
s += term
d *= (n**10)
d //= (((2*n+1)**5) * (2*n)**5)
term = (-1)**n * (205*(n**2) + 250*n + 77) * d
n += 1
return s >> (20 + 6)
"""
Euler's constant (gamma) is computed using the Brent-McMillan formula,
gamma ~= I(n)/J(n) - log(n), where
I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
J(n) = sum_{k=0,1,2,...} (n**k / k!)**2
H(k) = 1 + 1/2 + 1/3 + ... + 1/k
The error is bounded by O(exp(-4n)). Choosing n to be a power
of two, 2**p, the logarithm becomes particularly easy to calculate.[1]
We use the formulation of Algorithm 3.9 in [2] to make the summation
more efficient.
Reference:
[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
[2] Jonathan Borwein & David Bailey, Mathematics by Experiment,
A K Peters, 2003
"""
@constant_memo
def euler_fixed(prec):
extra = 30
prec += extra
# choose p such that exp(-4*(2**p)) < 2**-n
p = int(math.log((prec/4) * math.log(2), 2)) + 1
n = 2**p
A = U = -p*ln2_fixed(prec)
B = V = MPZ_ONE << prec
k = 1
while 1:
B = B*n**2//k**2
A = (A*n**2//k + B)//k
U += A
V += B
if max(abs(A), abs(B)) < 100:
break
k += 1
return (U<<(prec-extra))//V
@constant_memo
def mertens_fixed(prec):
wp = prec + 20
m = 2
s = mpf_euler(wp)
while 1:
t = mpf_zeta_int(m, wp)
if t == fone:
break
t = mpf_log(t, wp)
t = mpf_mul_int(t, moebius(m), wp)
t = mpf_div(t, from_int(m), wp)
s = mpf_add(s, t)
m += 1
return to_fixed(s, prec)
@constant_memo
def twinprime_fixed(prec):
def I(n):
return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n
wp = 2*prec + 30
res = fone
primes = [from_rational(1,p,wp) for p in [2,3,5,7]]
ppowers = [mpf_mul(p,p,wp) for p in primes]
n = 2
while 1:
a = mpf_zeta_int(n, wp)
for i in range(4):
a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp)
ppowers[i] = mpf_mul(ppowers[i], primes[i], wp)
a = mpf_pow_int(a, -I(n), wp)
if mpf_pos(a, prec+10, 'n') == fone:
break
#from libmpf import to_str
#print n, to_str(mpf_sub(fone, a), 6)
res = mpf_mul(res, a, wp)
n += 1
res = mpf_mul(res, from_int(3*15*35), wp)
res = mpf_div(res, from_int(4*16*36), wp)
return to_fixed(res, prec)
mpf_euler = def_mpf_constant(euler_fixed)
mpf_apery = def_mpf_constant(apery_fixed)
mpf_khinchin = def_mpf_constant(khinchin_fixed)
mpf_glaisher = def_mpf_constant(glaisher_fixed)
mpf_catalan = def_mpf_constant(catalan_fixed)
mpf_mertens = def_mpf_constant(mertens_fixed)
mpf_twinprime = def_mpf_constant(twinprime_fixed)
MAX_BERNOULLI_CACHE = 3000
"""
Small Bernoulli numbers and factorials are used in numerous summations,
so it is critical for speed that sequential computation is fast and that
values are cached up to a fairly high threshold.
On the other hand, we also want to support fast computation of isolated
large numbers. Currently, no such acceleration is provided for integer
factorials (though it is for large floating-point factorials, which are
computed via gamma if the precision is low enough).
For sequential computation of Bernoulli numbers, we use Ramanujan's formula
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
For isolated large Bernoulli numbers, we use the Riemann zeta function
to calculate a numerical value for B_n. The von Staudt-Clausen theorem
can then be used to optionally find the exact value of the
numerator and denominator.
"""
bernoulli_cache = {}
f3 = from_int(3)
f6 = from_int(6)
def bernoulli_size(n):
"""Accurately estimate the size of B_n (even n > 2 only)"""
lgn = math.log(n,2)
return int(2.326 + 0.5*lgn + n*(lgn - 4.094))
BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE)
def mpf_bernoulli(n, prec, rnd=None):
"""Computation of Bernoulli numbers (numerically)"""
if n < 2:
if n < 0:
raise ValueError("Bernoulli numbers only defined for n >= 0")
if n == 0:
return fone
if n == 1:
return mpf_neg(fhalf)
# For odd n > 1, the Bernoulli numbers are zero
if n & 1:
return fzero
# If precision is extremely high, we can save time by computing
# the Bernoulli number at a lower precision that is sufficient to
# obtain the exact fraction, round to the exact fraction, and
# convert the fraction back to an mpf value at the original precision
if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000:
p, q = bernfrac(n)
return from_rational(p, q, prec, rnd or round_floor)
if n > MAX_BERNOULLI_CACHE:
return mpf_bernoulli_huge(n, prec, rnd)
wp = prec + 30
# Reuse nearby precisions
wp += 32 - (prec & 31)
cached = bernoulli_cache.get(wp)
if cached:
numbers, state = cached
if n in numbers:
if not rnd:
return numbers[n]
return mpf_pos(numbers[n], prec, rnd)
m, bin, bin1 = state
if n - m > 10:
return mpf_bernoulli_huge(n, prec, rnd)
else:
if n > 10:
return mpf_bernoulli_huge(n, prec, rnd)
numbers = {0:fone}
m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE]
bernoulli_cache[wp] = (numbers, state)
while m <= n:
#print m
case = m % 6
# Accurately estimate size of B_m so we can use
# fixed point math without using too much precision
szbm = bernoulli_size(m)
s = 0
sexp = max(0, szbm) - wp
if m < 6:
a = MPZ_ZERO
else:
a = bin1
for j in xrange(1, m//6+1):
usign, uman, uexp, ubc = u = numbers[m-6*j]
if usign:
uman = -uman
s += lshift(a*uman, uexp-sexp)
# Update inner binomial coefficient
j6 = 6*j
a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6))
a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6))
if case == 0: b = mpf_rdiv_int(m+3, f3, wp)
if case == 2: b = mpf_rdiv_int(m+3, f3, wp)
if case == 4: b = mpf_rdiv_int(-m-3, f6, wp)
s = from_man_exp(s, sexp, wp)
b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp)
numbers[m] = b
m += 2
# Update outer binomial coefficient
bin = bin * ((m+2)*(m+3)) // (m*(m-1))
if m > 6:
bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6))
state[:] = [m, bin, bin1]
return numbers[n]
def mpf_bernoulli_huge(n, prec, rnd=None):
wp = prec + 10
piprec = wp + int(math.log(n,2))
v = mpf_gamma_int(n+1, wp)
v = mpf_mul(v, mpf_zeta_int(n, wp), wp)
v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp))
v = mpf_shift(v, 1-n)
if not n & 3:
v = mpf_neg(v)
return mpf_pos(v, prec, rnd or round_fast)
def bernfrac(n):
r"""
Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly,
where `B_n` denotes the `n`-th Bernoulli number. The fraction is
always reduced to lowest terms. Note that for `n > 1` and `n` odd,
`B_n = 0`, and `(0, 1)` is returned.
**Examples**
The first few Bernoulli numbers are exactly::
>>> from mpmath import *
>>> for n in range(15):
... p, q = bernfrac(n)
... print n, "%s/%s" % (p, q)
...
0 1/1
1 -1/2
2 1/6
3 0/1
4 -1/30
5 0/1
6 1/42
7 0/1
8 -1/30
9 0/1
10 5/66
11 0/1
12 -691/2730
13 0/1
14 7/6
This function works for arbitrarily large `n`::
>>> p, q = bernfrac(10**4)
>>> print q
2338224387510
>>> print len(str(p))
27692
>>> mp.dps = 15
>>> print mpf(p) / q
-9.04942396360948e+27677
>>> print bernoulli(10**4)
-9.04942396360948e+27677
Note: :func:`bernoulli` computes a floating-point approximation
directly, without computing the exact fraction first.
This is much faster for large `n`.
**Algorithm**
:func:`bernfrac` works by computing the value of `B_n` numerically
and then using the von Staudt-Clausen theorem [1] to reconstruct
the exact fraction. For large `n`, this is significantly faster than
computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic.
The implementation has been tested for `n = 10^m` up to `m = 6`.
In practice, :func:`bernfrac` appears to be about three times
slower than the specialized program calcbn.exe [2]
**References**
1. MathWorld, von Staudt-Clausen Theorem:
http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html
2. The Bernoulli Number Page:
http://www.bernoulli.org/
"""
n = int(n)
if n < 3:
return [(1, 1), (-1, 2), (1, 6)][n]
if n & 1:
return (0, 1)
q = 1
for k in list_primes(n+1):
if not (n % (k-1)):
q *= k
prec = bernoulli_size(n) + int(math.log(q,2)) + 20
b = mpf_bernoulli(n, prec)
p = mpf_mul(b, from_int(q))
pint = to_int(p, round_nearest)
return (pint, q)
"""
We compute the real factorial / gamma function using Spouge's approximation
x! = (x+a)**(x+1/2) * exp(-x-a) * [c_0 + S(x) + eps]
where S(x) is the sum of c_k/(x+k) from k = 1 to a-1 and the coefficients
are given by
c_0 = sqrt(2*pi)
(-1)**(k-1)
c_k = ----------- (a-k)**(k-1/2) exp(-k+a), k = 1,2,...,a-1
(k - 1)!
As proved by Spouge, if we choose a = log(2)/log(2*pi)*n = 0.38*n, the
relative error eps is less than 2^(-n) for any x in the right complex
half-plane (assuming a > 2). In practice, it seems that a can be chosen
quite a bit lower still (30-50%); this possibility should be investigated.
For negative x, we use the reflection formula.
References:
-----------
John L. Spouge, "Computation of the gamma, digamma, and trigamma
functions", SIAM Journal on Numerical Analysis 31 (1994), no. 3, 931-944.
"""
spouge_cache = {}
def calc_spouge_coefficients(a, prec):
wp = prec + int(a*1.4)
c = [0] * a
# b = exp(a-1)
b = mpf_exp(from_int(a-1), wp)
# e = exp(1)
e = mpf_exp(fone, wp)
# sqrt(2*pi)
sq2pi = mpf_sqrt(mpf_shift(mpf_pi(wp), 1), wp)
c[0] = to_fixed(sq2pi, prec)
for k in xrange(1, a):
# c[k] = ((-1)**(k-1) * (a-k)**k) * b / sqrt(a-k)
term = mpf_mul_int(b, ((-1)**(k-1) * (a-k)**k), wp)
term = mpf_div(term, mpf_sqrt(from_int(a-k), wp), wp)
c[k] = to_fixed(term, prec)
# b = b / (e * k)
b = mpf_div(b, mpf_mul(e, from_int(k), wp), wp)
return c
def get_spouge_coefficients(prec):
# This exact precision has been used before
if prec in spouge_cache:
return spouge_cache[prec]
for p in spouge_cache:
if 0.8 <= prec/float(p) < 1:
return spouge_cache[p]
# Here we estimate the value of a based on Spouge's inequality for
# the relative error
a = max(3, int(0.38*prec)) # 0.38 = log(2)/log(2*pi), ~= 1.26*n
coefs = calc_spouge_coefficients(a, prec)
spouge_cache[prec] = (prec, a, coefs)
return spouge_cache[prec]
def spouge_sum_real(x, prec, a, c):
x = to_fixed(x, prec)
s = c[0]
for k in xrange(1, a):
s += (c[k] << prec) // (x + (k << prec))
return from_man_exp(s, -prec, prec, round_floor)
def spouge_sum_rational(p, q, prec, a, c):
s = c[0]
for k in xrange(1, a):
s += c[k] * q // (p+q*k)
return from_man_exp(s, -prec, prec, round_floor)
def spouge_sum_complex(re, im, prec, a, c):
re = to_fixed(re, prec)
im = to_fixed(im, prec)
sre, sim = c[0], 0
mag = ((re**2)>>prec) + ((im**2)>>prec)
for k in xrange(1, a):
M = mag + re*(2*k) + ((k**2) << prec)
sre += (c[k] * (re + (k << prec))) // M
sim -= (c[k] * im) // M
re = from_man_exp(sre, -prec, prec, round_floor)
im = from_man_exp(sim, -prec, prec, round_floor)
return re, im
def mpf_gamma_int(n, prec, rounding=round_fast):
if n < 1000:
return from_int(ifac(n-1), prec, rounding)
# XXX: choose the cutoff less arbitrarily
size = int(n*math.log(n,2))
if prec > size/20.0:
return from_int(ifac(n-1), prec, rounding)
return mpf_gamma(from_int(n), prec, rounding)
def mpf_factorial(x, prec, rounding=round_fast):
return mpf_gamma(x, prec, rounding, p1=0)
def mpc_factorial(x, prec, rounding=round_fast):
return mpc_gamma(x, prec, rounding, p1=0)
def mpf_gamma(x, prec, rounding=round_fast, p1=1):
"""
Computes the gamma function of a real floating-point argument.
With p1=0, computes a factorial instead.
"""
sign, man, exp, bc = x
if not man:
if x == finf:
return finf
if x == fninf or x == fnan:
return fnan
# More precision is needed for enormous x. TODO:
# use Stirling's formula + Euler-Maclaurin summation
size = exp + bc
if size > 5:
size = int(size * math.log(size,2))
wp = prec + max(0, size) + 15
if exp >= 0:
if sign or (p1 and not man):
raise ValueError("gamma function pole")
# A direct factorial is fastest
if exp + bc <= 10:
return from_int(ifac((man<<exp)-p1), prec, rounding)
reflect = sign or exp+bc < -1
if p1:
# Should be done exactly!
x = mpf_sub(x, fone)
# x < 0.25
if reflect:
# gamma = pi / (sin(pi*x) * gamma(1-x))
wp += 15
pix = mpf_mul(x, mpf_pi(wp), wp)
t = mpf_sin_pi(x, wp)
g = mpf_gamma(mpf_sub(fone, x), wp)
return mpf_div(pix, mpf_mul(t, g, wp), prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_real(x, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
xpa = mpf_add(x, from_int(a), wp)
logxpa = mpf_log(xpa, wp)
xph = mpf_add(x, fhalf, wp)
t = mpf_sub(mpf_mul(logxpa, xph, wp), xpa, wp)
t = mpf_mul(mpf_exp(t, wp), s, prec, rounding)
return t
def mpc_gamma(x, prec, rounding=round_fast, p1=1):
re, im = x
if im == fzero:
return mpf_gamma(re, prec, rounding, p1), fzero
# More precision is needed for enormous x.
sign, man, exp, bc = re
isign, iman, iexp, ibc = im
if re == fzero:
size = iexp+ibc
else:
size = max(exp+bc, iexp+ibc)
if size > 5:
size = int(size * math.log(size,2))
reflect = sign or (exp+bc < -1)
wp = prec + max(0, size) + 25
# Near x = 0 pole (TODO: other poles)
if p1:
if size < -prec-5:
return mpc_add_mpf(mpc_div(mpc_one, x, 2*prec+10), \
mpf_neg(mpf_euler(2*prec+10)), prec, rounding)
elif size < -5:
wp += (-2*size)
if p1:
# Should be done exactly!
re_orig = re
re = mpf_sub(re, fone, bc+abs(exp)+2)
x = re, im
if reflect:
# Reflection formula
wp += 15
pi = mpf_pi(wp), fzero
pix = mpc_mul(x, pi, wp)
t = mpc_sin_pi(x, wp)
u = mpc_sub(mpc_one, x, wp)
g = mpc_gamma(u, wp)
w = mpc_mul(t, g, wp)
return mpc_div(pix, w, wp)
# Extremely close to the real line?
# XXX: reflection formula
if iexp+ibc < -wp:
a = mpf_gamma(re_orig, wp)
b = mpf_psi0(re_orig, wp)
gamma_diff = mpf_div(a, b, wp)
return mpf_pos(a, prec, rounding), mpf_mul(gamma_diff, im, prec, rounding)
sprec, a, c = get_spouge_coefficients(wp)
s = spouge_sum_complex(re, im, sprec, a, c)
# gamma = exp(log(x+a)*(x+0.5) - xpa) * s
repa = mpf_add(re, from_int(a), wp)
logxpa = mpc_log((repa, im), wp)
reph = mpf_add(re, fhalf, wp)
t = mpc_sub(mpc_mul(logxpa, (reph, im), wp), (repa, im), wp)
t = mpc_mul(mpc_exp(t, wp), s, prec, rounding)
return t
"""
For all polygamma (psi) functions, we use the Euler-Maclaurin summation
formula. It looks slightly different in the m = 0 and m > 0 cases.
For m = 0, we have
oo
___ B
(0) 1 \ 2 k -2 k
psi (z) ~ log z + --- - ) ------ z
2 z /___ (2 k)!
k = 1
Experiment shows that the minimum term of the asymptotic series
reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence
for psi (equivalent, in fact, to summing to the first few terms
directly before applying E-M) to obtain z large enough.
Since, very crudely, log z ~= 1 for Re(z) > 1, we can use
fixed-point arithmetic (if z is extremely large, log(z) itself
is a sufficient approximation, so we can stop there already).
For Re(z) << 0, we could use recurrence, but this is of course
inefficient for large negative z, so there we use the
reflection formula instead.
For m > 0, we have
N - 1
___
~~~(m) [ \ 1 ] 1 1
psi (z) ~ [ ) -------- ] + ---------- + -------- +
[ /___ m+1 ] m+1 m
k = 1 (z+k) ] 2 (z+N) m (z+N)
oo
___ B
\ 2 k (m+1) (m+2) ... (m+2k-1)
+ ) ------ ------------------------
/___ (2 k)! m + 2 k
k = 1 (z+N)
where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!).
Here again N is chosen to make z+N large enough for the minimum
term in the last series to become smaller than eps.
TODO: the current estimation of N for m > 0 is *very suboptimal*.
TODO: implement the reflection formula for m > 0, Re(z) << 0.
It is generally a combination of multiple cotangents. Need to
figure out a reasonably simple way to generate these formulas
on the fly.
TODO: maybe use exact algorithms to compute psi for integral
and certain rational arguments, as this can be much more
efficient. (On the other hand, the availability of these
special values provides a convenient way to test the general
algorithm.)
"""
def mpf_harmonic(x, prec, rnd):
if x in (fzero, fnan, finf):
return x
a = mpf_psi0(mpf_add(fone, x, prec+5), prec)
return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpc_harmonic(z, prec, rnd):
if z[1] == fzero:
return (mpf_harmonic(z[0], prec, rnd), fzero)
a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec)
return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd)
def mpf_psi0(x, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a real argument.
"""
sign, man, exp, bc = x
wp = prec + 10
if not man:
if x == finf: return x
if x == fninf or x == fnan: return fnan
if x == fzero or (exp >= 0 and sign):
raise ValueError("polygamma pole")
# Reflection formula
if sign and exp+bc > 3:
c, s = mpf_cos_sin_pi(x, wp)
q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp)
p = mpf_psi0(mpf_sub(fone, x, wp), wp)
return mpf_sub(p, q, prec, rnd)
# The logarithmic term is accurate enough
if (not sign) and bc + exp > wp:
return mpf_log(mpf_sub(x, fone, wp), prec, rnd)
# Initial recurrence to obtain a large enough x
m = to_int(x)
n = int(0.11*wp) + 2
s = MPZ_ZERO
x = to_fixed(x, wp)
one = MPZ_ONE << wp
if m < n:
for k in xrange(m, n):
s -= (one << wp) // x
x += one
x -= one
# Logarithmic term
s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp)
# Endpoint term in Euler-Maclaurin expansion
s += (one << wp) // (2*x)
# Euler-Maclaurin remainder sum
x2 = (x*x) >> wp
t = one
prev = 0
k = 1
while 1:
t = (t*x2) >> wp
bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp)
offset = (bexp + 2*wp)
if offset >= 0: term = (bman << offset) // (t*(2*k))
else: term = (bman >> (-offset)) // (t*(2*k))
if k & 1: s -= term
else: s += term
if k > 2 and term >= prev:
break
prev = term
k += 1
return from_man_exp(s, -wp, wp, rnd)
def mpc_psi0(z, prec, rnd=round_fast):
"""
Computation of the digamma function (psi function of order 0)
of a complex argument.
"""
re, im = z
# Fall back to the real case
if im == fzero:
return (mpf_psi0(re, prec, rnd), fzero)
wp = prec + 20
sign, man, exp, bc = re
# Reflection formula
if sign and exp+bc > 3:
c = mpc_cos_pi(z, wp)
s = mpc_sin_pi(z, wp)
q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp)
p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp)
return mpc_sub(p, q, prec, rnd)
# Just the logarithmic term
if (not sign) and bc + exp > wp:
return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd)
# Initial recurrence to obtain a large enough z
w = to_int(re)
n = int(0.11*wp) + 2
s = mpc_zero
if w < n:
for k in xrange(w, n):
s = mpc_sub(s, mpc_reciprocal(z, wp), wp)
z = mpc_add_mpf(z, fone, wp)
z = mpc_sub(z, mpc_one, wp)
# Logarithmic and endpoint term
s = mpc_add(s, mpc_log(z, wp), wp)
s = mpc_add(s, mpc_div(mpc_half, z, wp), wp)
# Euler-Maclaurin remainder sum
z2 = mpc_square(z, wp)
t = mpc_one
prev = mpc_zero
k = 1
eps = mpf_shift(fone, -wp+2)
while 1:
t = mpc_mul(t, z2, wp)
bern = mpf_bernoulli(2*k, wp)
term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp)
s = mpc_sub(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
prev = term
k += 1
return s
def mpf_psi(m, x, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a real argument x.
"""
if m == 0:
return mpf_psi0(x, prec, rnd=round_fast)
return mpc_psi(m, (x, fzero), prec, rnd)[0]
def mpc_psi(m, z, prec, rnd=round_fast):
"""
Computation of the polygamma function of arbitrary integer order
m >= 0, for a complex argument z.
"""
if m == 0:
return mpc_psi0(z, prec, rnd)
re, im = z
wp = prec + 20
sign, man, exp, bc = re
if not man:
if re == finf and im == fzero:
return (fzero, fzero)
if re == fnan:
return fnan
# Recurrence
w = to_int(re)
n = int(0.4*wp + 4*m)
s = mpc_zero
if w < n:
for k in xrange(w, n):
t = mpc_pow_int(z, -m-1, wp)
s = mpc_add(s, t, wp)
z = mpc_add_mpf(z, fone, wp)
zm = mpc_pow_int(z, -m, wp)
z2 = mpc_pow_int(z, -2, wp)
# 1/m*(z+N)^m
integral_term = mpc_div_mpf(zm, from_int(m), wp)
s = mpc_add(s, integral_term, wp)
# 1/2*(z+N)^(-(m+1))
s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp)
a = m + 1
b = 2
k = 1
# Important: we want to sum up to the *relative* error,
# not the absolute error, because psi^(m)(z) might be tiny
magn = mpc_abs(s, 10)
magn = magn[2]+magn[3]
eps = mpf_shift(fone, magn-wp+2)
while 1:
zm = mpc_mul(zm, z2, wp)
bern = mpf_bernoulli(2*k, wp)
scal = mpf_mul_int(bern, a, wp)
scal = mpf_div(scal, from_int(b), wp)
term = mpc_mul_mpf(zm, scal, wp)
s = mpc_add(s, term, wp)
szterm = mpc_abs(term, 10)
if k > 2 and mpf_le(szterm, eps):
break
#print k, to_str(szterm, 10), to_str(eps, 10)
a *= (m+2*k)*(m+2*k+1)
b *= (2*k+1)*(2*k+2)
k += 1
# Scale and sign factor
v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd)
if not (m & 1):
v = mpf_neg(v[0]), mpf_neg(v[1])
return v
"""
We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation
n-1
___ k
-1 \ (-1) (d_k - d_n)
eta(s) ~= ---- ) ------------------
d_n /___ s
k = 0 (k + 1)
where
k
___ i
\ (n + i - 1)! 4
d_k = n ) ---------------.
/___ (n - i)! (2i)!
i = 0
If s = a + b*I, the absolute error for eta(s) is bounded by
3 (1 + 2|b|)
------------ * exp(|b| pi/2)
n
(3+sqrt(8))
Disregarding the linear term, we have approximately,
log(err) ~= log(exp(1.58*|b|)) - log(5.8**n)
log(err) ~= 1.58*|b| - log(5.8)*n
log(err) ~= 1.58*|b| - 1.76*n
log2(err) ~= 2.28*|b| - 2.54*n
So for p bits, we should choose n > (p + 2.28*|b|) / 2.54.
References:
-----------
Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function"
http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps
http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
borwein_cache = {}
def borwein_coefficients(n):
if n in borwein_cache:
return borwein_cache[n]
ds = [MPZ_ZERO] * (n+1)
d = MPZ_ONE
s = ds[0] = MPZ_ONE
for i in range(1, n+1):
d = d * 4 * (n+i-1) * (n-i+1)
d //= ((2*i) * ((2*i)-1))
s += d
ds[i] = s
borwein_cache[n] = ds
return ds
ZETA_INT_CACHE_MAX_PREC = 1000
zeta_int_cache = {}
def mpf_zeta_int(s, prec, rnd=round_fast):
"""
Optimized computation of zeta(s) for an integer s.
"""
wp = prec + 20
s = int(s)
if s in zeta_int_cache and zeta_int_cache[s][0] >= wp:
return mpf_pos(zeta_int_cache[s][1], prec, rnd)
if s < 2:
if s == 1:
raise ValueError("zeta(1) pole")
if not s:
return mpf_neg(fhalf)
return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd)
# 2^-s term vanishes?
if s >= wp:
return mpf_perturb(fone, 0, prec, rnd)
# 5^-s term vanishes?
elif s >= wp*0.431:
t = one = 1 << wp
t += 1 << (wp - s)
t += one // (MPZ_THREE ** s)
t += 1 << max(0, wp - s*2)
return from_man_exp(t, -wp, prec, rnd)
else:
# Fast enough to sum directly?
# Even better, we use the Euler product (idea stolen from pari)
m = (float(wp)/(s-1) + 1)
if m < 30:
needed_terms = int(2.0**m + 1)
if needed_terms < int(wp/2.54 + 5) / 10:
t = fone
for k in list_primes(needed_terms):
#print k, needed_terms
powprec = int(wp - s*math.log(k,2))
if powprec < 2:
break
a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp)
t = mpf_mul(t, a, wp)
return mpf_div(fone, t, wp)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
s = MPZ(s)
for k in xrange(n):
t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s
t = (t << wp) // (-d[n])
t = (t << wp) // ((1 << wp) - (1 << (wp+1-s)))
if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache):
zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp))
return from_man_exp(t, -wp-wp, prec, rnd)
def mpf_zeta(s, prec, rnd=round_fast, alt=0):
sign, man, exp, bc = s
if not man:
if s == fzero:
if alt:
return fhalf
else:
return mpf_neg(fhalf)
if s == finf:
return fone
return fnan
wp = prec + 20
# First term vanishes?
if (not sign) and (exp + bc > (math.log(wp,2) + 2)):
return mpf_perturb(fone, alt, prec, rnd)
# Optimize for integer arguments
elif exp >= 0:
if alt:
if s == fone:
return mpf_ln2(prec, rnd)
z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd])
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(z, q, prec, rnd)
else:
return mpf_zeta_int(to_int(s), prec, rnd)
# Negative: use the reflection formula
# Borwein only proves the accuracy bound for x >= 1/2. However, based on
# tests, the accuracy without reflection is quite good even some distance
# to the left of 1/2. XXX: verify this.
if sign:
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_mul(mpf_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpf_sub(fone, s, 10*wp)
a = mpf_gamma(y, wp)
b = mpf_zeta(y, wp)
c = mpf_sin_pi(mpf_shift(s, -1), wp)
wp2 = wp + (exp+bc)
pi = mpf_pi(wp+wp2)
d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2)
return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd)
# Near pole
r = mpf_sub(fone, s, wp)
asign, aman, aexp, abc = mpf_abs(r)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
return mpf_ln2(prec, rnd)
else:
q = mpf_neg(mpf_div(fone, r, wp))
return mpf_add(q, mpf_euler(wp), prec, rnd)
else:
wp += max(0, pole_dist)
t = MPZ_ZERO
#wp += 16 - (prec & 15)
# Use Borwein's algorithm
n = int(wp/2.54 + 5)
d = borwein_coefficients(n)
t = MPZ_ZERO
sf = to_fixed(s, wp)
for k in xrange(n):
u = from_man_exp(-sf*log_int_fixed(k+1, wp), -2*wp, wp)
esign, eman, eexp, ebc = mpf_exp(u, wp)
offset = eexp + wp
if offset >= 0:
w = ((d[k] - d[n]) * eman) << offset
else:
w = ((d[k] - d[n]) * eman) >> (-offset)
if k & 1:
t -= w
else:
t += w
t = t // (-d[n])
t = from_man_exp(t, -wp, wp)
if alt:
return mpf_pos(t, prec, rnd)
else:
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
return mpf_div(t, q, prec, rnd)
def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False):
re, im = s
if im == fzero:
return mpf_zeta(re, prec, rnd, alt), fzero
# slow for large s
if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)):
raise NotImplementedError
wp = prec + 20
# Near pole
r = mpc_sub(mpc_one, s, wp)
asign, aman, aexp, abc = mpc_abs(r, 10)
pole_dist = -2*(aexp+abc)
if pole_dist > wp:
if alt:
q = mpf_ln2(wp)
y = mpf_mul(q, mpf_euler(wp), wp)
g = mpf_shift(mpf_mul(q, q, wp), -1)
g = mpf_sub(y, g)
z = mpc_mul_mpf(r, mpf_neg(g), wp)
z = mpc_add_mpf(z, q, wp)
return mpc_pos(z, prec, rnd)
else:
q = mpc_neg(mpc_div(mpc_one, r, wp))
q = mpc_add_mpf(q, mpf_euler(wp), wp)
return mpc_pos(q, prec, rnd)
else:
wp += max(0, pole_dist)
# Reflection formula. To be rigorous, we should reflect to the left of
# re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary
# slowdown for interesting values of s
if mpf_lt(re, fzero):
# XXX: could use the separate refl. formula for Dirichlet eta
if alt:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp),
wp), wp)
return mpc_mul(mpc_zeta(s, wp), q, prec, rnd)
# XXX: -1 should be done exactly
y = mpc_sub(mpc_one, s, 10*wp)
a = mpc_gamma(y, wp)
b = mpc_zeta(y, wp)
c = mpc_sin_pi(mpc_shift(s, -1), wp)
rsign, rman, rexp, rbc = re
isign, iman, iexp, ibc = im
mag = max(rexp+rbc, iexp+ibc)
wp2 = wp + mag
pi = mpf_pi(wp+wp2)
pi2 = (mpf_shift(pi, 1), fzero)
d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2)
return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd)
n = int(wp/2.54 + 5)
n += int(0.9*abs(to_int(im)))
d = borwein_coefficients(n)
ref = to_fixed(re, wp)
imf = to_fixed(im, wp)
tre = MPZ_ZERO
tim = MPZ_ZERO
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
critical_line = re == fhalf
for k in xrange(n):
log = log_int_fixed(k+1, wp)
# A square root is much cheaper than an exp
if critical_line:
w = one_2wp // sqrt_fixed((k+1) << wp, wp)
else:
w = to_fixed(mpf_exp(from_man_exp(-ref*log, -2*wp), wp), wp)
if k & 1:
w *= (d[n] - d[k])
else:
w *= (d[k] - d[n])
wre, wim = mpf_cos_sin(from_man_exp(-imf * log, -2*wp), wp)
tre += (w * to_fixed(wre, wp)) >> wp
tim += (w * to_fixed(wim, wp)) >> wp
tre //= (-d[n])
tim //= (-d[n])
tre = from_man_exp(tre, -wp, wp)
tim = from_man_exp(tim, -wp, wp)
if alt:
return mpc_pos((tre, tim), prec, rnd)
else:
q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp)
return mpc_div((tre, tim), q, prec, rnd)
def mpf_altzeta(s, prec, rnd=round_fast):
return mpf_zeta(s, prec, rnd, 1)
def mpc_altzeta(s, prec, rnd=round_fast):
return mpc_zeta(s, prec, rnd, 1)
mpf_zetasum = None
def exp_fixed_prod(x, wp):
u = from_man_exp(x, -2*wp, wp)
esign, eman, eexp, ebc = mpf_exp(u, wp)
offset = eexp + wp
if offset >= 0:
return eman << offset
else:
return eman >> (-offset)
def cos_sin_fixed_prod(x, wp):
cos, sin = mpf_cos_sin(from_man_exp(x, -2*wp), wp)
sign, man, exp, bc = cos
if sign:
man = -man
offset = exp + wp
if offset >= 0:
cos = man << offset
else:
cos = man >> (-offset)
sign, man, exp, bc = sin
if sign:
man = -man
offset = exp + wp
if offset >= 0:
sin = man << offset
else:
sin = man >> (-offset)
return cos, sin
def pow_fixed(x, n, wp):
if n == 1:
return x
y = MPZ_ONE << wp
while n:
if n & 1:
y = (y*x) >> wp
n -= 1
x = (x*x) >> wp
n //= 2
return y
def mpc_zetasum(s, a, n, derivatives, reflect, prec):
"""
Fast version of mp._zetasum, assuming s = complex, a = integer.
"""
wp = prec + 10
have_derivatives = derivatives != [0]
have_one_derivative = len(derivatives) == 1
# parse s
sre, sim = s
critical_line = (sre == fhalf)
sre = to_fixed(sre, wp)
sim = to_fixed(sim, wp)
maxd = max(derivatives)
if not have_one_derivative:
derivatives = range(maxd+1)
# x_d = 0, y_d = 0
xre = [MPZ_ZERO for d in derivatives]
xim = [MPZ_ZERO for d in derivatives]
if reflect:
yre = [MPZ_ZERO for d in derivatives]
yim = [MPZ_ZERO for d in derivatives]
else:
yre = yim = []
one = MPZ_ONE << wp
one_2wp = MPZ_ONE << (2*wp)
for w in xrange(a, a+n+1):
log = log_int_fixed(w, wp)
cos, sin = cos_sin_fixed_prod(-sim*log, wp)
if critical_line:
u = one_2wp // sqrt_fixed(w << wp, wp)
else:
u = exp_fixed_prod(-sre*log, wp)
xterm_re = (u * cos) >> wp
xterm_im = (u * sin) >> wp
if reflect:
reciprocal = (one_2wp // (u*w))
yterm_re = (reciprocal * cos) >> wp
yterm_im = (reciprocal * sin) >> wp
if have_derivatives:
if have_one_derivative:
log = pow_fixed(log, maxd, wp)
xre[0] += (xterm_re * log) >> wp
xim[0] += (xterm_im * log) >> wp
if reflect:
yre[0] += (yterm_re * log) >> wp
yim[0] += (yterm_im * log) >> wp
else:
t = MPZ_ONE << wp
for d in derivatives:
xre[d] += (xterm_re * t) >> wp
xim[d] += (xterm_im * t) >> wp
if reflect:
yre[d] += (yterm_re * t) >> wp
yim[d] += (yterm_im * t) >> wp
t = (t * log) >> wp
else:
xre[0] += xterm_re
xim[0] += xterm_im
if reflect:
yre[0] += yterm_re
yim[0] += yterm_im
if have_derivatives:
if have_one_derivative:
if maxd % 2:
xre[0] = -xre[0]
xim[0] = -xim[0]
if reflect:
yre[0] = -yre[0]
yim[0] = -yim[0]
else:
xre = [(-1)**d * xre[d] for d in derivatives]
xim = [(-1)**d * xim[d] for d in derivatives]
if reflect:
yre = [(-1)**d * yre[d] for d in derivatives]
yim = [(-1)**d * yim[d] for d in derivatives]
xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n'))
for (xa, xb) in zip(xre, xim)]
ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n'))
for (ya, yb) in zip(yre, yim)]
return xs, ys
|
import numpy as np
from numpy.testing import *
import unittest
from conformalmapping import *
class TestZline(unittest.TestCase):
def test_create(self):
line = Zline(np.array([0.0, 1.0j]))
def test_position(self):
line = Zline(np.array([0.0, 1.0j]))
pt = line.position(0)
assert_allclose(pt, 0.0 )
def test_tangent(self):
line = Zline(np.array([0.0, 1.0j]))
t = line.tangent(0)
assert_allclose(t, 1.0j)
|
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy
OSCAR_SHOP_NAME = 'e-ticaret'
OSCAR_SHOP_TAGLINE = ''
OSCAR_HOMEPAGE = reverse_lazy('promotions:home')
OSCAR_BASKET_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_BASKET_COOKIE_OPEN = 'oscar_open_basket'
OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 10000
OSCAR_RECENTLY_VIEWED_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_RECENTLY_VIEWED_COOKIE_NAME = 'oscar_history'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_DEFAULT_CURRENCY = 'TL'
OSCAR_IMAGE_FOLDER = 'images/products/%Y/%m/'
OSCAR_PROMOTION_FOLDER = 'images/promotions/'
OSCAR_DELETE_IMAGE_FILES = True
OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg'
OSCAR_UPLOAD_ROOT = '/tmp'
OSCAR_REQUIRED_ADDRESS_FIELDS = ('first_name', 'last_name', 'line1',
'line4', 'postcode', 'country')
OSCAR_PRODUCTS_PER_PAGE = 20
OSCAR_ALLOW_ANON_CHECKOUT = False
COUNTDOWN, LIST, SINGLE_PRODUCT, TABBED_BLOCK = (
'Countdown', 'List', 'SingleProduct', 'TabbedBlock')
OSCAR_PROMOTION_MERCHANDISING_BLOCK_TYPES = (
(COUNTDOWN, "Vertical list"),
(LIST, "Horizontal list"),
(TABBED_BLOCK, "Tabbed block"),
(SINGLE_PRODUCT, "Single product"),
)
OSCAR_PROMOTION_POSITIONS = (('page', 'Page'),
('right', 'Right-hand sidebar'),
('left', 'Left-hand sidebar'))
OSCAR_ALLOW_ANON_REVIEWS = True
OSCAR_MODERATE_REVIEWS = False
OSCAR_ACCOUNTS_REDIRECT_URL = 'customer:profile-view'
OSCAR_EAGER_ALERTS = True
OSCAR_SEND_REGISTRATION_EMAIL = True
OSCAR_FROM_EMAIL = 'oscar@example.com'
OSCAR_SLUG_FUNCTION = 'oscar.core.utils.default_slugifier'
OSCAR_SLUG_MAP = {}
OSCAR_SLUG_BLACKLIST = []
OSCAR_COOKIES_DELETE_ON_LOGOUT = ['oscar_recently_viewed_products', ]
OSCAR_HIDDEN_FEATURES = []
OSCAR_DASHBOARD_NAVIGATION = [
{
'label': _('Dashboard'),
'icon': 'icon-th-list',
'url_name': 'dashboard:index',
},
{
'label': _('Catalogue'),
'icon': 'icon-sitemap',
'children': [
{
'label': _('Products'),
'url_name': 'dashboard:catalogue-product-list',
},
{
'label': _('Product Types'),
'url_name': 'dashboard:catalogue-class-list',
},
{
'label': _('Categories'),
'url_name': 'dashboard:catalogue-category-list',
},
{
'label': _('Ranges'),
'url_name': 'dashboard:range-list',
},
{
'label': _('Low stock alerts'),
'url_name': 'dashboard:stock-alert-list',
},
]
},
{
'label': _('Fulfilment'),
'icon': 'icon-shopping-cart',
'children': [
{
'label': _('Orders'),
'url_name': 'dashboard:order-list',
},
{
'label': _('Partners'),
'url_name': 'dashboard:partner-list',
},
# The shipping method dashboard is disabled by default as it might
# be confusing. Weight-based shipping methods aren't hooked into
# the shipping repository by default (as it would make
# customising the repository slightly more difficult).
# {
# 'label': _('Shipping charges'),
# 'url_name': 'dashboard:shipping-method-list',
# },
]
},
{
'label': _('Customers'),
'icon': 'icon-group',
'url_name': 'dashboard:users-index',
},
{
'label': _('Offers'),
'icon': 'icon-bullhorn',
'url_name': 'dashboard:offer-list',
},
{
'label': _('Yorumlar'),
'icon': 'icon-comment ',
'url_name': 'dashboard:reviews-list',
# 'children': [
# {
# 'label': _('Content blocks'),
# 'url_name': 'dashboard:promotion-list',
# },
# {
# 'label': _('Content blocks by page'),
# 'url_name': 'dashboard:promotion-list-by-page',
# },
# {
# 'label': _('Pages'),
# 'url_name': 'dashboard:page-list',
# },
# {
# 'label': _('Email templates'),
# 'url_name': 'dashboard:comms-list',
# },
#]
},
# {
# 'label': _('Reports'),
# 'icon': 'icon-bar-chart',
# 'url_name': 'dashboard:reports-index',
# },
]
OSCAR_DASHBOARD_DEFAULT_ACCESS_FUNCTION = 'oscar.apps.dashboard.nav.default_access_fn' # noqa
OSCAR_SEARCH_FACETS = {
'fields': {
# The key for these dicts will be used when passing facet data
# to the template. Same for the 'queries' dict below.
'product_class': {
'name': _('Type'),
'field': 'product_class'
},
'rating': {
'name': _('Rating'),
'field': 'rating',
# You can specify an 'options' element that will be passed to the
# SearchQuerySet.facet() call. It's hard to get 'missing' to work
# correctly though as of Solr's hilarious syntax for selecting
# items without a specific facet:
# http://wiki.apache.org/solr/SimpleFacetParameters#facet.method
# 'options': {'missing': 'true'}
}
},
'queries': {
'price_range': {
'name': _('Price range'),
'field': 'price',
'queries': [
# This is a list of (name, query) tuples where the name will
# be displayed on the front-end.
(_('0 to 20'), u'[0 TO 20]'),
(_('20 to 40'), u'[20 TO 40]'),
(_('40 to 60'), u'[40 TO 60]'),
(_('60+'), u'[60 TO *]'),
]
},
}
}
OSCAR_SETTINGS = dict(
[(k, v) for k, v in locals().items() if k.startswith('OSCAR_')])
|
def extractZazaTranslations(item):
"""
Parser for 'ZAZA Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
from flask import Flask
from webservices.models import Provider
from webservices.sync import provider_for_flask
import os
app = Flask(__name__)
with open(os.path.join(os.path.dirname(__file__), 'keys.txt')) as fobj:
data = fobj.read()
app.keys = dict([
line.split(':')
for line in data.split('\n')
if line.strip()
])
class HelloProvider(Provider):
def get_private_key(self, public_key):
private_key = app.keys.get(public_key)
return private_key
def provide(self, data):
name = data.get('name', 'world')
return {'greeting': u'hello %s' % name}
provider_for_flask(app, '/', HelloProvider())
if __name__ == '__main__':
app.run(port=8000, debug=True)
|
from paste import httpexceptions
from paste.cascade import Cascade
from paste.urlparser import StaticURLParser
from paste.registry import RegistryManager
from paste.deploy.config import ConfigMiddleware, CONFIG
from paste.deploy.converters import asbool
from pylons.error import error_template
from pylons import config
from pylons.middleware import ErrorHandler, ErrorDocuments, StaticJavascripts, error_mapper
import pylons.wsgiapp
import projectname.lib.helpers
import projectname.lib.app_globals as app_globals
from projectname.config.environment import load_environment
def make_app(global_conf, full_stack=True, **app_conf):
"""Create a WSGI application and return it
global_conf is a dict representing the Paste configuration options, the
paste.deploy.converters should be used when parsing Paste config options
to ensure they're treated properly.
"""
load_environment(global_conf, app_conf)
# Pull the other engine and put a new one up first
config.template_engines.pop()
kidopts = {'kid.assume_encoding':'utf-8', 'kid.encoding':'utf-8'}
pylons.config.add_template_engine('kid', 'projectname.kidtemplates', kidopts)
# Load our default Pylons WSGI app and make g available
app = pylons.wsgiapp.PylonsApp(helpers=projectname.lib.helpers,
g=app_globals.Globals)
app = ConfigMiddleware(app, config._current_obj())
# If errror handling and exception catching will be handled by middleware
# for multiple apps, you will want to set full_stack = False in your config
# file so that it can catch the problems.
if asbool(full_stack):
# Change HTTPExceptions to HTTP responses
app = httpexceptions.make_middleware(app, global_conf)
# Error Handling
app = ErrorHandler(app, global_conf, error_template=error_template, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (if debug is disabled also
# intercepts 500)
app = ErrorDocuments(app, global_conf, mapper=error_mapper, **app_conf)
# Establish the Registry for this application
app = RegistryManager(app)
static_app = StaticURLParser(config['pylons.paths']['static_files'])
javascripts_app = StaticJavascripts()
app = Cascade([static_app, javascripts_app, app])
return app
|
import sys
import os
project_root = os.path.abspath("../")
sys.path.insert(0, project_root)
sys.path.insert(0, os.path.abspath("../scripts/examples")) # allow autodoc and sphinxdoc on nut_shell.py
sys.path.insert(0, os.path.abspath('.'))
import ec
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon', 'sphinxcontrib.spelling', 'eccontrib.sphinxdoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'ec'
copyright = u'2015, Laufire Technologies'
version = ec.__version__
release = ec.__version__
exclude_patterns = ['_build', '_spelling']
add_module_names = False
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'ecdoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
latex_documents = [
('index', 'ec.tex',
u'ec Documentation',
u'Laufire Technologies', 'manual'),
]
man_pages = [
('index', 'ec',
u'ec Documentation',
[u'Laufire Technologies'], 1)
]
texinfo_documents = [
('index', 'ec',
u'ec Documentation',
u'Laufire Technologies',
'ec',
'One line description of project.',
'Miscellaneous'),
]
autodoc_member_order = 'bysource'
spelling_word_list_filename='_spelling/wordlist.txt'
|
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "private_files.tests.settings")
import django
django.setup()
from django.core.management import call_command
from django.conf import settings
try:
settings.configure()
except RuntimeError:
pass
call_command('test')
if __name__ == '__main__':
main()
|
import logging
from collections import defaultdict
from itertools import combinations
import re
import os
import os.path as op
import numpy as np
from scipy.spatial.distance import pdist
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..utils import _clean_names
from ..externals.six.moves import map
from .channels import _contains_ch_type
from ..viz import plot_montage
from ..transforms import (_sphere_to_cartesian, _polar_to_cartesian,
_cartesian_to_sphere)
class Layout(object):
"""Sensor layouts
Layouts are typically loaded from a file using read_layout. Only use this
class directly if you're constructing a new layout.
Parameters
----------
box : tuple of length 4
The box dimension (x_min, x_max, y_min, y_max).
pos : array, shape=(n_channels, 4)
The positions of the channels in 2d (x, y, width, height).
names : list
The channel names.
ids : list
The channel ids.
kind : str
The type of Layout (e.g. 'Vectorview-all').
"""
def __init__(self, box, pos, names, ids, kind):
self.box = box
self.pos = pos
self.names = names
self.ids = ids
self.kind = kind
def save(self, fname):
"""Save Layout to disk
Parameters
----------
fname : str
The file name (e.g. 'my_layout.lout').
"""
x = self.pos[:, 0]
y = self.pos[:, 1]
width = self.pos[:, 2]
height = self.pos[:, 3]
if fname.endswith('.lout'):
out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
elif fname.endswith('.lay'):
out_str = ''
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
for ii in range(x.shape[0]):
out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
x[ii], y[ii], width[ii], height[ii], self.names[ii]))
f = open(fname, 'w')
f.write(out_str)
f.close()
def __repr__(self):
return '<Layout | %s - Channels: %s ...>' % (self.kind,
', '.join(self.names[:3]))
def _read_lout(fname):
"""Aux function"""
with open(fname) as f:
box_line = f.readline() # first line contains box dimension
box = tuple(map(float, box_line.split()))
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def _read_lay(fname):
"""Aux function"""
with open(fname) as f:
box = None
names, pos, ids = [], [], []
for line in f:
splits = line.split()
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def read_layout(kind, path=None, scale=True):
"""Read layout from a file
Parameters
----------
kind : str
The name of the .lout file (e.g. kind='Vectorview-all' for
'Vectorview-all.lout').
path : str | None
The path of the folder containing the Layout file. Defaults to the
mne/channels/data/layouts folder inside your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using layout.pos.
Defaults to True.
Returns
-------
layout : instance of Layout
The layout.
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'layouts')
if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
kind += '.lout'
elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
kind += '.lay'
if kind.endswith('.lout'):
fname = op.join(path, kind)
kind = kind[:-5]
box, pos, names, ids = _read_lout(fname)
elif kind.endswith('.lay'):
fname = op.join(path, kind)
kind = kind[:-4]
box, pos, names, ids = _read_lay(fname)
kind.endswith('.lay')
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
if scale:
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
"""Create .lout file from EEG electrode digitization
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info (e.g., raw.info).
radius : float
Viewport radius as a fraction of main figure height. Defaults to 0.5.
width : float | None
Width of sensor axes as a fraction of main figure height. By default,
this will be the maximum width possible without axes overlapping.
height : float | None
Height of sensor axes as a fraction of main figure height. By default,
this will be the maximum height possible withough axes overlapping.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads'].
Returns
-------
layout : Layout
The generated Layout.
"""
if not (0 <= radius <= 0.5):
raise ValueError('The radius parameter should be between 0 and 0.5.')
if width is not None and not (0 <= width <= 1.0):
raise ValueError('The width parameter should be between 0 and 1.')
if height is not None and not (0 <= height <= 1.0):
raise ValueError('The height parameter should be between 0 and 1.')
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
loc2d = _auto_topomap_coords(info, picks)
names = [info['chs'][i]['ch_name'] for i in picks]
# Scale [x, y] to [-0.5, 0.5]
loc2d_min = np.min(loc2d, axis=0)
loc2d_max = np.max(loc2d, axis=0)
loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
# If no width or height specified, calculate the maximum value possible
# without axes overlapping.
if width is None or height is None:
width, height = _box_size(loc2d, width, height, padding=0.1)
# Scale to viewport radius
loc2d *= 2 * radius
# Some subplot centers will be at the figure edge. Shrink everything so it
# fits in the figure.
scaling = min(1 / (1. + width), 1 / (1. + height))
loc2d *= scaling
width *= scaling
height *= scaling
# Shift to center
loc2d += 0.5
n_channels = loc2d.shape[0]
pos = np.c_[loc2d[:, 0] - 0.5 * width,
loc2d[:, 1] - 0.5 * height,
width * np.ones(n_channels),
height * np.ones(n_channels)]
box = (0, 1, 0, 1)
ids = 1 + np.arange(n_channels)
layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
return layout
def make_grid_layout(info, picks=None, n_col=None):
""" Generate .lout file for custom data, i.e., ICA sources
Parameters
----------
info : instance of mne.io.meas_info.Info | None
Measurement info (e.g., raw.info). If None, default names will be
employed.
picks : array-like of int | None
The indices of the channels to be included. If None, al misc channels
will be included.
n_col : int | None
Number of columns to generate. If None, a square grid will be produced.
Returns
-------
layout : Layout
The generated layout.
"""
if picks is None:
picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
names = [info['chs'][k]['ch_name'] for k in picks]
if not names:
raise ValueError('No misc data channels found.')
ids = list(range(len(picks)))
size = len(picks)
if n_col is None:
# prepare square-like layout
n_row = n_col = np.sqrt(size) # try square
if n_col % 1:
# try n * (n-1) rectangle
n_col, n_row = int(n_col + 1), int(n_row)
if n_col * n_row < size: # jump to the next full square
n_row += 1
else:
n_row = np.ceil(size / float(n_col))
# setup position grid
x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
np.linspace(-0.5, 0.5, n_row))
x, y = x.ravel()[:size], y.ravel()[:size]
width, height = _box_size(np.c_[x, y], padding=0.1)
# Some axes will be at the figure edge. Shrink everything so it fits in the
# figure. Add 0.01 border around everything
border_x, border_y = (0.01, 0.01)
x_scaling = 1 / (1. + width + border_x)
y_scaling = 1 / (1. + height + border_y)
x = x * x_scaling
y = y * y_scaling
width *= x_scaling
height *= y_scaling
# Shift to center
x += 0.5
y += 0.5
# calculate pos
pos = np.c_[x - 0.5*width, y - 0.5*height, width * np.ones(size), height *
np.ones(size)]
box = (0, 1, 0, 1)
layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
return layout
def find_layout(info, ch_type=None, exclude='bads'):
"""Choose a layout based on the channels in the info 'chs' field
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
The channel type for selecting single channel layouts.
Defaults to None. Note, this argument will only be considered for
VectorView type layout. Use `meg` to force using the full layout
in situations where the info does only contain one sensor type.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads'].
Returns
-------
layout : Layout instance | None
None if layout not found.
"""
our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
raise ValueError('Invalid channel type (%s) requested '
'`ch_type` must be %s' % (ch_type, our_types))
chs = info['chs']
coil_types = set([ch['coil_type'] for ch in chs])
channel_types = set([ch['kind'] for ch in chs])
has_vv_mag = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3]])
has_vv_grad = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3]]
)
has_vv_meg = has_vv_mag and has_vv_grad
has_vv_only_mag = has_vv_mag and not has_vv_grad
has_vv_only_grad = has_vv_grad and not has_vv_mag
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any([k in ctf_other_types for k in coil_types])))
# hack due to MNE-C bug in IO of CTF
n_kit_grads = len([ch for ch in chs
if ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD])
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
if ch_type == "meg" and not has_any_meg:
raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
if ch_type == "eeg" and not has_eeg_coils:
raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
if ((has_vv_meg and ch_type is None) or
(any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
layout_name = 'Vectorview-all'
elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
layout_name = 'Vectorview-mag'
elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
layout_name = 'Vectorview-grad'
elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
(has_eeg_coils_and_meg and ch_type == 'eeg')):
if not isinstance(info, dict):
raise RuntimeError('Cannot make EEG layout, no measurement info '
'was passed to `find_layout`')
return make_eeg_layout(info, exclude=exclude)
elif has_4D_mag:
layout_name = 'magnesWH3600'
elif has_CTF_grad:
layout_name = 'CTF-275'
elif n_kit_grads == 157:
layout_name = 'KIT-157'
else:
return None
layout = read_layout(layout_name)
if not is_old_vv:
layout.names = _clean_names(layout.names, remove_whitespace=True)
if has_CTF_grad:
layout.names = _clean_names(layout.names, before_dash=True)
return layout
def _box_size(points, width=None, height=None, padding=0.0):
""" Given a series of points, calculate an appropriate box size.
Parameters
----------
points : array, shape (n_points, 2)
The centers of the axes as a list of (x, y) coordinate pairs. Normally
these are points in the range [0, 1] centered at 0.5.
width : float | None
An optional box width to enforce. When set, only the box height will be
calculated by the function.
height : float | None
An optional box height to enforce. When set, only the box width will be
calculated by the function.
padding : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
Returns
-------
width : float
Width of the box
height : float
Height of the box
"""
xdiff = lambda a, b: np.abs(a[0] - b[0])
ydiff = lambda a, b: np.abs(a[1] - b[1])
points = np.asarray(points)
all_combinations = list(combinations(points, 2))
if width is None and height is None:
if len(points) <= 1:
# Trivial case first
width = 1.0
height = 1.0
else:
# Find the closest two points A and B.
a, b = all_combinations[np.argmin(pdist(points))]
# The closest points define either the max width or max height.
w, h = xdiff(a, b), ydiff(a, b)
if w > h:
width = w
else:
height = h
# At this point, either width or height is known, or both are known.
if height is None:
# Find all axes that could potentially overlap horizontally.
hdist = pdist(points, xdiff)
candidates = [all_combinations[i] for i, d in enumerate(hdist)
if d < width]
if len(candidates) == 0:
# No axes overlap, take all the height you want.
height = 1.0
else:
# Find an appropriate height so all none of the found axes will
# overlap.
height = np.min([ydiff(*c) for c in candidates])
elif width is None:
# Find all axes that could potentially overlap vertically.
vdist = pdist(points, ydiff)
candidates = [all_combinations[i] for i, d in enumerate(vdist)
if d < height]
if len(candidates) == 0:
# No axes overlap, take all the width you want.
width = 1.0
else:
# Find an appropriate width so all none of the found axes will
# overlap.
width = np.min([xdiff(*c) for c in candidates])
# Add a bit of padding between boxes
width *= 1 - padding
height *= 1 - padding
return width, height
def _find_topomap_coords(info, picks, layout=None):
"""Try to guess the E/MEG layout and return appropriate topomap coordinates
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info.
picks : list of int
Channel indices to generate topomap coords for.
layout : None | instance of Layout
Enforce using a specific layout. With None, a new map is generated.
With None, a layout is chosen based on the channels in the chs
parameter.
Returns
-------
coords : array, shape = (n_chs, 2)
2 dimensional coordinates for each sensor for a topomap plot.
"""
if len(picks) == 0:
raise ValueError("Need more than 0 channels.")
if layout is not None:
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(info, picks)
return pos
def _auto_topomap_coords(info, picks):
"""Make a 2 dimensional sensor map from sensor positions in an info dict.
The default is to use the electrode locations. The fallback option is to
attempt using digitization points of kind FIFFV_POINT_EEG. This only works
with EEG and requires an equal number of digitization points and sensors.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
picks : list of int
The channel indices to generate topomap coords for.
Returns
-------
locs : array, shape = (n_sensors, 2)
An array of positions of the 2 dimensional map.
"""
chs = [info['chs'][i] for i in picks]
# Use channel locations if available
locs3d = np.array([ch['loc'][:3] for ch in chs])
# If electrode locations are not available, use digization points
if len(locs3d) == 0 or np.allclose(locs3d, 0):
logging.warning('Did not find any electrode locations the info, '
'will attempt to use digitization points instead. '
'However, if digitization points do not correspond to '
'the EEG electrodes, this will lead to bad results. '
'Please verify that the sensor locations in the plot '
'are accurate.')
# MEG/EOG/ECG sensors don't have digitization points; all requested
# channels must be EEG
for ch in chs:
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise ValueError("Cannot determine location of MEG/EOG/ECG "
"channels using digitization points.")
break
eeg_ch_names = [ch['ch_name'] for ch in info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH]
# Get EEG digitization points
if info['dig'] is None or len(info['dig']) == 0:
raise RuntimeError('No digitization points found.')
locs3d = np.array([point['r'] for point in info['dig']
if point['kind'] == FIFF.FIFFV_POINT_EEG])
if len(locs3d) == 0:
raise RuntimeError('Did not find any digitization points of '
'kind FIFFV_POINT_EEG (%d) in the info.'
% FIFF.FIFFV_POINT_EEG)
if len(locs3d) != len(eeg_ch_names):
raise ValueError("Number of EEG digitization points (%d) "
"doesn't match the number of EEG channels "
"(%d)" % (len(locs3d), len(eeg_ch_names)))
# Center digitization points on head origin
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
from ..preprocessing.maxfilter import fit_sphere_to_headshape
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds)
origin_head /= 1000. # to meters
locs3d -= origin_head
# Match the digitization points with the requested
# channels.
eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
# Duplicate points cause all kinds of trouble during visualization
if np.min(pdist(locs3d)) < 1e-10:
raise ValueError('Electrode positions must be unique.')
x, y, z = locs3d.T
az, el, r = _cartesian_to_sphere(x, y, z)
locs2d = np.c_[_polar_to_cartesian(az, np.pi / 2 - el)]
return locs2d
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
"""Find the picks for pairing grad channels
Parameters
----------
info : instance of mne.io.meas_info.Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info['chs'][i]
name = ch['ch_name']
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
raise ValueError("No 'grad' channel pairs found.")
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info['ch_names']
picks = [ch_names.index(c['ch_name']) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = (_find_topomap_coords(info, picks, layout)
.reshape(shape).mean(axis=1))
return picks, coords
else:
return picks
def _pair_grad_sensors_from_ch_names(ch_names):
"""Find the indexes for pairing grad channels
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indexes of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
def _merge_grad_data(data):
"""Merge data from channel pairs using the RMS
Parameters
----------
data : array, shape = (n_channels, n_times)
Data for channels, ordered in pairs.
Returns
-------
data : array, shape = (n_channels / 2, n_times)
The root mean square for each pair.
"""
data = data.reshape((len(data) // 2, 2, -1))
data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
return data
class Montage(object):
"""Montage for EEG cap
Montages are typically loaded from a file using read_montage. Only use this
class directly if you're constructing a new montage.
Parameters
----------
pos : array, shape (n_channels, 3)
The positions of the channels in 3d.
ch_names : list
The channel names.
kind : str
The type of montage (e.g. 'standard_1005').
selection : array of int
The indices of the selected channels in the montage file.
"""
def __init__(self, pos, ch_names, kind, selection):
self.pos = pos
self.ch_names = ch_names
self.kind = kind
self.selection = selection
def __repr__(self):
s = '<Montage | %s - %d Channels: %s ...>'
s %= self.kind, len(self.ch_names), ', '.join(self.ch_names[:3])
return s
def plot(self, scale_factor=1.5, show_names=False):
"""Plot EEG sensor montage
Parameters
----------
scale_factor : float
Determines the size of the points. Defaults to 1.5
show_names : bool
Whether to show the channel names. Defaults to False
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure object.
"""
return plot_montage(self, scale_factor=scale_factor,
show_names=show_names)
def read_montage(kind, ch_names=None, path=None, scale=True):
"""Read montage from a file
Parameters
----------
kind : str
The name of the montage file (e.g. kind='easycap-M10' for
'easycap-M10.txt'). Files with extensions '.elc', '.txt', '.csd',
'.elp', '.hpts' or '.sfp' are supported.
ch_names : list of str | None
The names to read. If None, all names are returned.
path : str | None
The path of the folder containing the montage file. Defaults to the
mne/channels/data/montages folder in your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using montage.pos.
Defaults to True.
Returns
-------
montage : instance of Montage
The montage.
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'montages')
if not op.isabs(kind):
supported = ('.elc', '.txt', '.csd', '.sfp', '.elp', '.hpts')
montages = [op.splitext(f) for f in os.listdir(path)]
montages = [m for m in montages if m[1] in supported and kind == m[0]]
if len(montages) != 1:
raise ValueError('Could not find the montage. Please provide the '
'full path.')
kind, ext = montages[0]
fname = op.join(path, kind + ext)
else:
kind, ext = op.splitext(kind)
fname = op.join(path, kind + ext)
if ext == '.sfp':
# EGI geodesic
dtype = np.dtype('S4, f8, f8, f8')
data = np.loadtxt(fname, dtype=dtype)
pos = np.c_[data['f1'], data['f2'], data['f3']]
ch_names_ = data['f0'].astype(np.str)
elif ext == '.elc':
# 10-5 system
ch_names_ = []
pos = []
with open(fname) as fid:
for line in fid:
if 'Positions\n' in line:
break
pos = []
for line in fid:
if 'Labels\n' in line:
break
pos.append(list(map(float, line.split())))
for line in fid:
if not line or not set(line) - set([' ']):
break
ch_names_.append(line.strip(' ').strip('\n'))
pos = np.array(pos)
elif ext == '.txt':
# easycap
data = np.genfromtxt(fname, dtype='str', skiprows=1)
ch_names_ = list(data[:, 0])
theta, phi = data[:, 1].astype(float), data[:, 2].astype(float)
x = 85. * np.cos(np.deg2rad(phi)) * np.sin(np.deg2rad(theta))
y = 85. * np.sin(np.deg2rad(theta)) * np.sin(np.deg2rad(phi))
z = 85. * np.cos(np.deg2rad(theta))
pos = np.c_[x, y, z]
elif ext == '.csd':
# CSD toolbox
dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
('off_sph', 'f8')]
table = np.loadtxt(fname, skiprows=2, dtype=dtype)
ch_names_ = table['label']
theta = (2 * np.pi * table['theta']) / 360.
phi = (2 * np.pi * table['phi']) / 360.
pos = _sphere_to_cartesian(theta, phi, r=1.0)
pos = np.asarray(pos).T
elif ext == '.elp':
# standard BESA spherical
dtype = np.dtype('S8, S8, f8, f8, f8')
data = np.loadtxt(fname, dtype=dtype, skiprows=1)
az = data['f2']
horiz = data['f3']
radius = np.abs(az / 180.)
angles = np.array([90. - h if a >= 0. else -90. - h
for h, a in zip(horiz, az)])
sph_phi = (0.5 - radius) * 180.
sph_theta = angles
azimuth = sph_theta / 180.0 * np.pi
elevation = sph_phi / 180.0 * np.pi
r = 85.
y, x, z = _sphere_to_cartesian(azimuth, elevation, r)
pos = np.c_[x, y, z]
ch_names_ = data['f1'].astype(np.str)
elif ext == '.hpts':
from ..transforms import get_ras_to_neuromag_trans, apply_trans
# MNE-C specified format for generic digitizer data
dtype = [('type', 'S8'), ('name', 'S8'),
('x', 'f8'), ('y','f8'), ('z', 'f8')]
data = np.loadtxt(fname, dtype=dtype)
pos_ = data[data['type'].astype(np.str) == 'eeg']
pos = np.vstack((pos_['x'], pos_['y'], pos_['z'])).T
ch_names_ = pos_['name'].astype(np.str)
else:
raise ValueError('Currently the "%s" template is not supported.' %
kind)
selection = np.arange(len(pos))
if ch_names is not None:
sel, ch_names_ = zip(*[(i, e) for i, e in enumerate(ch_names_)
if e in ch_names])
sel = list(sel)
pos = pos[sel]
selection = selection[sel]
else:
ch_names_ = list(ch_names_)
kind = op.split(kind)[-1]
return Montage(pos=pos, ch_names=ch_names_, kind=kind, selection=selection)
def apply_montage(info, montage):
"""Apply montage to EEG data.
This function will replace the EEG channel names and locations with
the values specified for the particular montage.
Note: This function will change the info variable in place.
Parameters
----------
info : instance of Info
The measurement info to update.
montage : instance of Montage
The montage to apply.
"""
if not _contains_ch_type(info, 'eeg'):
raise ValueError('No EEG channels found.')
sensors_found = False
for pos, ch_name in zip(montage.pos, montage.ch_names):
if ch_name not in info['ch_names']:
continue
ch_idx = info['ch_names'].index(ch_name)
info['ch_names'][ch_idx] = ch_name
info['chs'][ch_idx]['eeg_loc'] = np.c_[pos, [0.] * 3]
info['chs'][ch_idx]['loc'] = np.r_[pos, [0.] * 9]
sensors_found = True
if not sensors_found:
raise ValueError('None of the sensors defined in the montage were '
'found in the info structure. Check the channel '
'names.')
|
from __future__ import annotations
import logging
import math
import os
from dxtbx.serialize import load
import xia2.Wrappers.Dials.Integrate
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.bits import auto_logfiler
from xia2.lib.SymmetryLib import lattice_to_spacegroup
from xia2.Schema.Interfaces.Integrater import Integrater
from xia2.Wrappers.Dials.anvil_correction import anvil_correction as _anvil_correction
from xia2.Wrappers.Dials.ExportMtz import ExportMtz as _ExportMtz
from xia2.Wrappers.Dials.ExportXDSASCII import ExportXDSASCII
from xia2.Wrappers.Dials.Report import Report as _Report
logger = logging.getLogger("xia2.Modules.Integrater.DialsIntegrater")
class DialsIntegrater(Integrater):
"""A class to implement the Integrater interface using *only* DIALS
programs."""
def __init__(self):
super().__init__()
# check that the programs exist - this will raise an exception if
# they do not...
xia2.Wrappers.Dials.Integrate.Integrate()
# place to store working data
self._data_files = {}
# internal parameters to pass around
self._integrate_parameters = {}
self._intgr_integrated_filename = None
self._intgr_integrated_reflections = None
self._intgr_experiments_filename = None
# Check whether to do diamond anvil cell attenuation correction.
self.high_pressure = PhilIndex.params.dials.high_pressure.correction
# overload these methods as we don't want the resolution range
# feeding back... aha - but we may want to assign them
# from outside!
def set_integrater_resolution(self, dmin, dmax, user=False):
if user:
Integrater.set_integrater_resolution(self, dmin, dmax, user)
def set_integrater_high_resolution(self, dmin, user=False):
if user:
Integrater.set_integrater_high_resolution(self, dmin, user)
def set_integrater_low_resolution(self, dmax, user=False):
self._intgr_reso_low = dmax
# admin functions
def get_integrated_experiments(self):
self.integrate()
return self._intgr_experiments_filename
def get_integrated_filename(self):
self.integrate()
return self._intgr_integrated_filename
def get_integrated_reflections(self):
self.integrate()
return self._intgr_integrated_reflections
def set_integrated_experiments(self, filename):
Integrater.set_integrated_experiments = filename
def set_integrated_reflections(self, filename):
Integrater.set_integrated_reflections = filename
# factory functions
def Integrate(self):
params = PhilIndex.params.dials.integrate
integrate = xia2.Wrappers.Dials.Integrate.Integrate()
integrate.set_phil_file(params.phil_file)
if params.mosaic == "new":
integrate.set_new_mosaic()
if PhilIndex.params.dials.fast_mode:
integrate.set_profile_fitting(False)
else:
profile_fitting = PhilIndex.params.xia2.settings.integration.profile_fitting
integrate.set_profile_fitting(profile_fitting)
# Options for profile modelling.
integrate.set_scan_varying_profile(params.scan_varying_profile)
high_pressure = PhilIndex.params.dials.high_pressure.correction
integrate.set_profile_params(
params.min_spots.per_degree, params.min_spots.overall, high_pressure
)
integrate.set_background_outlier_algorithm(params.background_outlier_algorithm)
integrate.set_background_algorithm(params.background_algorithm)
integrate.set_working_directory(self.get_working_directory())
integrate.set_experiments_filename(self._intgr_experiments_filename)
integrate.set_reflections_filename(self._intgr_indexed_filename)
auto_logfiler(integrate, "INTEGRATE")
return integrate
def Report(self):
report = _Report()
report.set_working_directory(self.get_working_directory())
report.set_experiments_filename(self._intgr_experiments_filename)
report.set_reflections_filename(self._intgr_integrated_reflections)
auto_logfiler(report, "REPORT")
return report
def ExportMtz(self):
params = PhilIndex.params.dials.integrate
export = _ExportMtz()
pname, xname, _ = self.get_integrater_project_info()
export.crystal_name = xname
export.project_name = pname
export.set_working_directory(self.get_working_directory())
export.set_experiments_filename(self._intgr_experiments_filename)
export.set_combine_partials(params.combine_partials)
export.set_partiality_threshold(params.partiality_threshold)
if len(self.get_matching_images()) == 1:
export.set_partiality_threshold(0.1)
if (
len(self.get_matching_images()) == 1
or PhilIndex.params.dials.fast_mode
or not PhilIndex.params.xia2.settings.integration.profile_fitting
):
# With no profiles available have to rely on summation alone
export.set_intensity_choice("sum")
auto_logfiler(export, "EXPORTMTZ")
return export
# now some real functions, which do useful things
def _integrater_reset_callback(self):
"""Delete all results on a reset."""
logger.debug("Deleting all stored results.")
self._data_files = {}
self._integrate_parameters = {}
def _integrate_prepare(self):
"""Prepare for integration - in XDS terms this may mean rerunning
IDXREF to get the XPARM etc. DEFPIX is considered part of the full
integration as it is resolution dependent."""
Citations.cite("dials")
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images), max(images))
logger.debug("DIALS INTEGRATE PREPARE:")
logger.debug("Wavelength: %.6f" % self.get_wavelength())
logger.debug("Distance: %.2f" % self.get_distance())
if not self.get_integrater_low_resolution():
dmax = self._intgr_refiner.get_indexer_low_resolution(
self.get_integrater_epoch()
)
self.set_integrater_low_resolution(dmax)
logger.debug(
"Low resolution set to: %s" % self.get_integrater_low_resolution()
)
## copy the data across
refiner = self.get_integrater_refiner()
# For multi-sweep refinement, get the split experiments from after refinement.
if PhilIndex.params.xia2.settings.multi_sweep_refinement:
self._intgr_experiments_filename = refiner.get_refiner_payload(
f"{self._intgr_sweep._name}_models.expt"
)
self._intgr_indexed_filename = refiner.get_refiner_payload(
f"{self._intgr_sweep._name}_observations.refl"
)
# Otherwise, there should only be a single experiment list and reflection table.
else:
self._intgr_experiments_filename = refiner.get_refiner_payload(
"models.expt"
)
self._intgr_indexed_filename = refiner.get_refiner_payload(
"observations.refl"
)
experiments = load.experiment_list(self._intgr_experiments_filename)
experiment = experiments[0]
# this is the result of the cell refinement
self._intgr_cell = experiment.crystal.get_unit_cell().parameters()
logger.debug("Files available at the end of DIALS integrate prepare:")
for f in self._data_files:
logger.debug("%s" % f)
self.set_detector(experiment.detector)
self.set_beam_obj(experiment.beam)
self.set_goniometer(experiment.goniometer)
def _integrate(self):
"""Actually do the integration - in XDS terms this will mean running
DEFPIX and INTEGRATE to measure all the reflections."""
integrate = self.Integrate()
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images), max(images))
imageset = self.get_imageset()
beam = imageset.get_beam()
detector = imageset.get_detector()
d_min_limit = detector.get_max_resolution(beam.get_s0())
if (
d_min_limit > self._intgr_reso_high
or PhilIndex.params.xia2.settings.resolution.keep_all_reflections
):
logger.debug(
"Overriding high resolution limit: %f => %f"
% (self._intgr_reso_high, d_min_limit)
)
self._intgr_reso_high = d_min_limit
integrate.set_experiments_filename(self._intgr_experiments_filename)
integrate.set_reflections_filename(self._intgr_indexed_filename)
if PhilIndex.params.dials.integrate.d_max:
integrate.set_d_max(PhilIndex.params.dials.integrate.d_max)
else:
integrate.set_d_max(self._intgr_reso_low)
if PhilIndex.params.dials.integrate.d_min:
integrate.set_d_min(PhilIndex.params.dials.integrate.d_min)
else:
integrate.set_d_min(self._intgr_reso_high)
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_log_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE",
integrate.get_log_file(),
)
integrate.run()
self._intgr_experiments_filename = integrate.get_integrated_experiments()
# also record the batch range - needed for the analysis of the
# radiation damage in chef...
self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])
# FIXME (i) record the log file, (ii) get more information out from the
# integration log on the quality of the data and (iii) the mosaic spread
# range observed and R.M.S. deviations.
self._intgr_integrated_reflections = integrate.get_integrated_reflections()
if not os.path.isfile(self._intgr_integrated_reflections):
raise RuntimeError(
"Integration failed: %s does not exist."
% self._intgr_integrated_reflections
)
self._intgr_per_image_statistics = integrate.get_per_image_statistics()
logger.info(self.show_per_image_statistics())
report = self.Report()
html_filename = os.path.join(
self.get_working_directory(),
"%i_dials.integrate.report.html" % report.get_xpid(),
)
report.set_html_filename(html_filename)
report.run(wait_for_completion=True)
FileHandler.record_html_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE", html_filename
)
experiments = load.experiment_list(self._intgr_experiments_filename)
profile = experiments.profiles()[0]
mosaic = profile.sigma_m()
try:
m_min, m_max, m_mean = mosaic.min_max_mean().as_tuple()
self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
except AttributeError:
self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)
logger.info(
"Mosaic spread: %.3f < %.3f < %.3f"
% self.get_integrater_mosaic_min_mean_max()
)
# If running in high-pressure mode, run dials.anvil_correction to
# correct for the attenuation of the incident and diffracted beams by the
# diamond anvils.
if self.high_pressure:
self._anvil_correction()
return self._intgr_integrated_reflections
def _integrate_finish(self):
"""
Finish off the integration.
If in high-pressure mode run dials.anvil_correction.
Run dials.export.
"""
# FIXME - do we want to export every time we call this method
# (the file will not have changed) and also (more important) do
# we want a different exported MTZ file every time (I do not think
# that we do; these can be very large) - was exporter.get_xpid() ->
# now dials
if self._output_format == "hkl":
exporter = self.ExportMtz()
exporter.set_reflections_filename(self._intgr_integrated_reflections)
mtz_filename = os.path.join(
self.get_working_directory(), "%s_integrated.mtz" % "dials"
)
exporter.set_mtz_filename(mtz_filename)
exporter.run()
self._intgr_integrated_filename = mtz_filename
# record integrated MTZ file
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE", mtz_filename
)
from iotbx.reflection_file_reader import any_reflection_file
miller_arrays = any_reflection_file(
self._intgr_integrated_filename
).as_miller_arrays()
# look for profile-fitted intensities
intensities = [
ma for ma in miller_arrays if ma.info().labels == ["IPR", "SIGIPR"]
]
if len(intensities) == 0:
# look instead for summation-integrated intensities
intensities = [
ma for ma in miller_arrays if ma.info().labels == ["I", "SIGI"]
]
assert len(intensities)
self._intgr_n_ref = intensities[0].size()
if not os.path.isfile(self._intgr_integrated_filename):
raise RuntimeError(
"dials.export failed: %s does not exist."
% self._intgr_integrated_filename
)
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number
== lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return mtz_filename
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number == 0
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return mtz_filename
logger.debug(
"Reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
hklin = mtz_filename
from xia2.Wrappers.CCP4.Reindex import Reindex
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_operator(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_spacegroup(self._intgr_spacegroup_number)
else:
reindex.set_spacegroup(
lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
)
hklout = "%s_reindex.mtz" % hklin[:-4]
reindex.set_hklin(hklin)
reindex.set_hklout(hklout)
reindex.reindex()
self._intgr_integrated_filename = hklout
self._intgr_cell = reindex.get_cell()
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_experiments(),
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_reflections(),
)
return hklout
elif self._output_format == "pickle":
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number
== lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return self._intgr_integrated_reflections
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number == 0
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return self._intgr_integrated_reflections
logger.debug(
"Reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
from xia2.Wrappers.Dials.Reindex import Reindex
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_cb_op(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_space_group(self._intgr_spacegroup_number)
else:
reindex.set_space_group(
lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
)
reindex.set_experiments_filename(self.get_integrated_experiments())
reindex.set_indexed_filename(self.get_integrated_reflections())
reindex.run()
self._intgr_integrated_reflections = (
reindex.get_reindexed_reflections_filename()
)
self._intgr_integrated_filename = (
reindex.get_reindexed_reflections_filename()
)
self._intgr_experiments_filename = (
reindex.get_reindexed_experiments_filename()
)
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_experiments(),
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_reflections(),
)
return None # this will be set to intgr_hklout - better to cause failure
# due to it being none than it be set wrong and not knowing?
def _integrate_select_images_wedges(self):
"""Select correct images based on image headers."""
phi_width = self.get_phi_width()
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block?
wedges = []
if len(images) < 3:
# work on the assumption that this is a reference pair
wedges.append(images[0])
if len(images) > 1:
wedges.append(images[1])
else:
block_size = min(len(images), int(math.ceil(5 / phi_width)))
logger.debug(
"Adding images for indexer: %d -> %d"
% (images[0], images[block_size - 1])
)
wedges.append((images[0], images[block_size - 1]))
if int(90.0 / phi_width) + block_size in images:
# assume we can add a wedge around 45 degrees as well...
logger.debug(
"Adding images for indexer: %d -> %d"
% (
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
logger.debug(
"Adding images for indexer: %d -> %d"
% (
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
else:
# add some half-way anyway
first = (len(images) // 2) - (block_size // 2) + images[0] - 1
if first > wedges[0][1]:
last = first + block_size - 1
logger.debug("Adding images for indexer: %d -> %d" % (first, last))
wedges.append((first, last))
if len(images) > block_size:
logger.debug(
"Adding images for indexer: %d -> %d"
% (images[-block_size], images[-1])
)
wedges.append((images[-block_size], images[-1]))
return wedges
def get_integrater_corrected_intensities(self):
self.integrate()
exporter = ExportXDSASCII()
exporter.set_experiments_filename(self.get_integrated_experiments())
exporter.set_reflections_filename(self.get_integrated_reflections())
exporter.set_working_directory(self.get_working_directory())
auto_logfiler(exporter)
self._intgr_corrected_hklout = os.path.join(
self.get_working_directory(), "%i_DIALS.HKL" % exporter.get_xpid()
)
exporter.set_hkl_filename(self._intgr_corrected_hklout)
exporter.run()
assert os.path.exists(self._intgr_corrected_hklout)
return self._intgr_corrected_hklout
def _anvil_correction(self):
"""Correct for attenuation in a diamond anvil pressure cell."""
logger.info(
"Rescaling integrated reflections for attenuation in the diamond anvil "
"cell."
)
params = PhilIndex.params.dials.high_pressure
anvil_correct = _anvil_correction()
# Take the filenames of the last integration step as input.
anvil_correct.experiments_filenames.append(self._intgr_experiments_filename)
anvil_correct.reflections_filenames.append(self._intgr_integrated_reflections)
# The output reflections have a filename appended with '_corrected'.
output_reflections = "_corrected".join(
os.path.splitext(self._intgr_integrated_reflections)
)
anvil_correct.output_reflections_filename = output_reflections
# Set the user-specified parameters from the PHIL scope.
anvil_correct.density = params.anvil.density
anvil_correct.thickness = params.anvil.thickness
anvil_correct.normal = params.anvil.normal
# Run dials.anvil_correction with the parameters as set above.
anvil_correct.set_working_directory(self.get_working_directory())
auto_logfiler(anvil_correct)
anvil_correct.run()
self._intgr_integrated_reflections = output_reflections
|
"""Generate minidump symbols for use by the Crash server.
Note: This should be run inside the chroot.
This produces files in the breakpad format required by minidump_stackwalk and
the crash server to dump stack information.
Basically it scans all the split .debug files in /build/$BOARD/usr/lib/debug/
and converts them over using the `dump_syms` programs. Those plain text .sym
files are then stored in /build/$BOARD/usr/lib/debug/breakpad/.
If you want to actually upload things, see upload_symbols.py.
"""
from __future__ import print_function
import collections
import ctypes
import multiprocessing
import os
import tempfile
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import signals
SymbolHeader = collections.namedtuple('SymbolHeader',
('cpu', 'id', 'name', 'os',))
def ReadSymsHeader(sym_file):
"""Parse the header of the symbol file
The first line of the syms file will read like:
MODULE Linux arm F4F6FA6CCBDEF455039C8DE869C8A2F40 blkid
https://code.google.com/p/google-breakpad/wiki/SymbolFiles
Args:
sym_file: The symbol file to parse
Returns:
A SymbolHeader object
Raises:
ValueError if the first line of |sym_file| is invalid
"""
with cros_build_lib.Open(sym_file) as f:
header = f.readline().split()
if header[0] != 'MODULE' or len(header) != 5:
raise ValueError('header of sym file is invalid')
return SymbolHeader(os=header[1], cpu=header[2], id=header[3], name=header[4])
def GenerateBreakpadSymbol(elf_file, debug_file=None, breakpad_dir=None,
board=None, strip_cfi=False, num_errors=None):
"""Generate the symbols for |elf_file| using |debug_file|
Args:
elf_file: The file to dump symbols for
debug_file: Split debug file to use for symbol information
breakpad_dir: The dir to store the output symbol file in
board: If |breakpad_dir| is not specified, use |board| to find it
strip_cfi: Do not generate CFI data
num_errors: An object to update with the error count (needs a .value member)
Returns:
The number of errors that were encountered.
"""
if breakpad_dir is None:
breakpad_dir = FindBreakpadDir(board)
if num_errors is None:
num_errors = ctypes.c_int()
cmd_base = ['dump_syms']
if strip_cfi:
cmd_base += ['-c']
# Some files will not be readable by non-root (e.g. set*id /bin/su).
needs_sudo = not os.access(elf_file, os.R_OK)
def _DumpIt(cmd_args):
if needs_sudo:
run_command = cros_build_lib.SudoRunCommand
else:
run_command = cros_build_lib.RunCommand
return run_command(
cmd_base + cmd_args, redirect_stderr=True, log_stdout_to_file=temp.name,
error_code_ok=True, debug_level=logging.DEBUG)
def _CrashCheck(ret, msg):
if ret < 0:
cros_build_lib.PrintBuildbotStepWarnings()
logging.warning('dump_syms crashed with %s; %s',
signals.StrSignal(-ret), msg)
osutils.SafeMakedirs(breakpad_dir)
with tempfile.NamedTemporaryFile(dir=breakpad_dir, bufsize=0) as temp:
if debug_file:
# Try to dump the symbols using the debug file like normal.
cmd_args = [elf_file, os.path.dirname(debug_file)]
result = _DumpIt(cmd_args)
if result.returncode:
# Sometimes dump_syms can crash because there's too much info.
# Try dumping and stripping the extended stuff out. At least
# this way we'll get the extended symbols. http://crbug.com/266064
_CrashCheck(result.returncode, 'retrying w/out CFI')
cmd_args = ['-c', '-r'] + cmd_args
result = _DumpIt(cmd_args)
_CrashCheck(result.returncode, 'retrying w/out debug')
basic_dump = result.returncode
else:
basic_dump = True
if basic_dump:
# If that didn't work (no debug, or dump_syms still failed), try
# dumping just the file itself directly.
result = _DumpIt([elf_file])
if result.returncode:
# A lot of files (like kernel files) contain no debug information,
# do not consider such occurrences as errors.
cros_build_lib.PrintBuildbotStepWarnings()
_CrashCheck(result.returncode, 'giving up entirely')
if 'file contains no debugging information' in result.error:
logging.warning('no symbols found for %s', elf_file)
else:
num_errors.value += 1
logging.error('dumping symbols for %s failed:\n%s', elf_file,
result.error)
return num_errors.value
# Move the dumped symbol file to the right place:
# /build/$BOARD/usr/lib/debug/breakpad/<module-name>/<id>/<module-name>.sym
header = ReadSymsHeader(temp)
logging.info('Dumped %s as %s : %s', elf_file, header.name, header.id)
sym_file = os.path.join(breakpad_dir, header.name, header.id,
header.name + '.sym')
osutils.SafeMakedirs(os.path.dirname(sym_file))
os.rename(temp.name, sym_file)
os.chmod(sym_file, 0o644)
temp.delete = False
return num_errors.value
def GenerateBreakpadSymbols(board, breakpad_dir=None, strip_cfi=False,
generate_count=None, sysroot=None,
num_processes=None, clean_breakpad=False,
exclude_dirs=(), file_list=None):
"""Generate symbols for this board.
If |file_list| is None, symbols are generated for all executables, otherwise
only for the files included in |file_list|.
TODO(build):
This should be merged with buildbot_commands.GenerateBreakpadSymbols()
once we rewrite cros_generate_breakpad_symbols in python.
Args:
board: The board whose symbols we wish to generate
breakpad_dir: The full path to the breakpad directory where symbols live
strip_cfi: Do not generate CFI data
generate_count: If set, only generate this many symbols (meant for testing)
sysroot: The root where to find the corresponding ELFs
num_processes: Number of jobs to run in parallel
clean_breakpad: Should we `rm -rf` the breakpad output dir first; note: we
do not do any locking, so do not run more than one in parallel when True
exclude_dirs: List of dirs (relative to |sysroot|) to not search
file_list: Only generate symbols for files in this list. Each file must be a
full path (including |sysroot| prefix).
TODO(build): Support paths w/o |sysroot|.
Returns:
The number of errors that were encountered.
"""
if breakpad_dir is None:
breakpad_dir = FindBreakpadDir(board)
if sysroot is None:
sysroot = cros_build_lib.GetSysroot(board=board)
if clean_breakpad:
logging.info('cleaning out %s first', breakpad_dir)
osutils.RmDir(breakpad_dir, ignore_missing=True, sudo=True)
# Make sure non-root can write out symbols as needed.
osutils.SafeMakedirs(breakpad_dir, sudo=True)
if not os.access(breakpad_dir, os.W_OK):
cros_build_lib.SudoRunCommand(['chown', '-R', str(os.getuid()),
breakpad_dir])
debug_dir = FindDebugDir(board)
exclude_paths = [os.path.join(debug_dir, x) for x in exclude_dirs]
if file_list is None:
file_list = []
file_filter = dict.fromkeys([os.path.normpath(x) for x in file_list], False)
logging.info('generating breakpad symbols using %s', debug_dir)
# Let's locate all the debug_files and elfs first along with the debug file
# sizes. This way we can start processing the largest files first in parallel
# with the small ones.
# If |file_list| was given, ignore all other files.
targets = []
for root, dirs, files in os.walk(debug_dir):
if root in exclude_paths:
logging.info('Skipping excluded dir %s', root)
del dirs[:]
continue
for debug_file in files:
debug_file = os.path.join(root, debug_file)
# Turn /build/$BOARD/usr/lib/debug/sbin/foo.debug into
# /build/$BOARD/sbin/foo.
elf_file = os.path.join(sysroot, debug_file[len(debug_dir) + 1:-6])
if file_filter:
if elf_file in file_filter:
file_filter[elf_file] = True
elif debug_file in file_filter:
file_filter[debug_file] = True
else:
continue
# Filter out files based on common issues with the debug file.
if not debug_file.endswith('.debug'):
continue
elif debug_file.endswith('.ko.debug'):
logging.debug('Skipping kernel module %s', debug_file)
continue
elif os.path.islink(debug_file):
# The build-id stuff is common enough to filter out by default.
if '/.build-id/' in debug_file:
msg = logging.debug
else:
msg = logging.warning
msg('Skipping symbolic link %s', debug_file)
continue
# Filter out files based on common issues with the elf file.
if not os.path.exists(elf_file):
# Sometimes we filter out programs from /usr/bin but leave behind
# the .debug file.
logging.warning('Skipping missing %s', elf_file)
continue
targets.append((os.path.getsize(debug_file), elf_file, debug_file))
bg_errors = multiprocessing.Value('i')
if file_filter:
files_not_found = [x for x, found in file_filter.iteritems() if not found]
bg_errors.value += len(files_not_found)
if files_not_found:
logging.error('Failed to find requested files: %s', files_not_found)
# Now start generating symbols for the discovered elfs.
with parallel.BackgroundTaskRunner(GenerateBreakpadSymbol,
breakpad_dir=breakpad_dir, board=board,
strip_cfi=strip_cfi,
num_errors=bg_errors,
processes=num_processes) as queue:
for _, elf_file, debug_file in sorted(targets, reverse=True):
if generate_count == 0:
break
queue.put([elf_file, debug_file])
if generate_count is not None:
generate_count -= 1
if generate_count == 0:
break
return bg_errors.value
def FindDebugDir(board):
"""Given a |board|, return the path to the split debug dir for it"""
sysroot = cros_build_lib.GetSysroot(board=board)
return os.path.join(sysroot, 'usr', 'lib', 'debug')
def FindBreakpadDir(board):
"""Given a |board|, return the path to the breakpad dir for it"""
return os.path.join(FindDebugDir(board), 'breakpad')
def main(argv):
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', default=None,
help='board to generate symbols for')
parser.add_argument('--breakpad_root', type='path', default=None,
help='root directory for breakpad symbols')
parser.add_argument('--exclude-dir', type=str, action='append',
default=[],
help='directory (relative to |board| root) to not search')
parser.add_argument('--generate-count', type=int, default=None,
help='only generate # number of symbols')
parser.add_argument('--noclean', dest='clean', action='store_false',
default=True,
help='do not clean out breakpad dir before running')
parser.add_argument('--jobs', type=int, default=None,
help='limit number of parallel jobs')
parser.add_argument('--strip_cfi', action='store_true', default=False,
help='do not generate CFI data (pass -c to dump_syms)')
parser.add_argument('file_list', nargs='*', default=None,
help='generate symbols for only these files '
'(e.g. /build/$BOARD/usr/bin/foo)')
opts = parser.parse_args(argv)
opts.Freeze()
if opts.board is None:
cros_build_lib.Die('--board is required')
ret = GenerateBreakpadSymbols(opts.board, breakpad_dir=opts.breakpad_root,
strip_cfi=opts.strip_cfi,
generate_count=opts.generate_count,
num_processes=opts.jobs,
clean_breakpad=opts.clean,
exclude_dirs=opts.exclude_dir,
file_list=opts.file_list)
if ret:
logging.error('encountered %i problem(s)', ret)
# Since exit(status) gets masked, clamp it to 1 so we don't inadvertently
# return 0 in case we are a multiple of the mask.
ret = 1
return ret
|
from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
print("config filename={0}".format(filename))
if not os.path.isdir(".git"):
print("Checking that config from site-packages")
self.assertNotEqual(filename.find("site-packages"), -1)
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('notaro', '0016_auto_20160303_2115'),
]
operations = [
migrations.AddField(
model_name='document',
name='date_added',
field=models.DateTimeField(default=datetime.date(2016, 1, 1), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='document',
name='date_changed',
field=models.DateTimeField(default=datetime.date(2016, 1, 1), auto_now=True),
preserve_default=False,
),
]
|
import os
import copy
import glob
import json
import logging
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from opencivicdata.legislative.models import LegislativeSession
from pupa.exceptions import DuplicateItemError
from pupa.utils import get_pseudo_id, utcnow
from pupa.exceptions import UnresolvedIdError, DataImportError
from pupa.models import Identifier
def omnihash(obj):
""" recursively hash unhashable objects """
if isinstance(obj, set):
return hash(frozenset(omnihash(e) for e in obj))
elif isinstance(obj, (tuple, list)):
return hash(tuple(omnihash(e) for e in obj))
elif isinstance(obj, dict):
return hash(frozenset((k, omnihash(v)) for k, v in obj.items()))
else:
return hash(obj)
def items_differ(jsonitems, dbitems, subfield_dict):
""" check whether or not jsonitems and dbitems differ """
# short circuit common cases
if len(jsonitems) == len(dbitems) == 0:
# both are empty
return False
elif len(jsonitems) != len(dbitems):
# if lengths differ, they're definitely different
return True
original_jsonitems = jsonitems
jsonitems = copy.deepcopy(jsonitems)
keys = jsonitems[0].keys()
# go over dbitems looking for matches
for dbitem in dbitems:
order = getattr(dbitem, 'order', None)
match = None
for i, jsonitem in enumerate(jsonitems):
# check if all keys (excluding subfields) match
for k in keys:
if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None):
break
else:
# all fields match so far, possibly equal, just check subfields now
for k in subfield_dict:
jsonsubitems = jsonitem[k]
dbsubitems = list(getattr(dbitem, k).all())
if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]):
break
else:
# if the dbitem sets 'order', then the order matters
if order is not None and int(order) != original_jsonitems.index(jsonitem):
break
# these items are equal, so let's mark it for removal
match = i
break
if match is not None:
# item exists in both, remove from jsonitems
jsonitems.pop(match)
else:
# exists in db but not json
return True
# if we get here, jsonitems has to be empty because we asserted that the length was
# the same and we found a match for each thing in dbitems, here's a safety check just in case
if jsonitems: # pragma: no cover
return True
return False
class BaseImporter(object):
""" BaseImporter
Override:
get_object(data)
limit_spec(spec) [optional, required if pseudo_ids are used]
prepare_for_db(data) [optional]
postimport() [optional]
"""
_type = None
model_class = None
related_models = {}
preserve_order = set()
merge_related = {}
def __init__(self, jurisdiction_id):
self.jurisdiction_id = jurisdiction_id
self.json_to_db_id = {}
self.duplicates = {}
self.pseudo_id_cache = {}
self.session_cache = {}
self.logger = logging.getLogger("pupa")
self.info = self.logger.info
self.debug = self.logger.debug
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
def get_session_id(self, identifier):
if identifier not in self.session_cache:
self.session_cache[identifier] = LegislativeSession.objects.get(
identifier=identifier, jurisdiction_id=self.jurisdiction_id).id
return self.session_cache[identifier]
# no-ops to be overriden
def prepare_for_db(self, data):
return data
def postimport(self):
pass
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id))
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream())
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash]
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
def lookup_obj_id(self, pupa_id, model):
content_type = ContentType.objects.get_for_model(model)
try:
obj_id = Identifier.objects.get(identifier=pupa_id,
content_type=content_type,
jurisdiction_id=self.jurisdiction_id).object_id
except Identifier.DoesNotExist:
obj_id = None
return obj_id
|
from __future__ import print_function, division
from pandas import read_csv, DataFrame
from sklearn import cross_validation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
import numpy as np
from basic_imports import (
X_train, y_train,
X_cv, y_cv,
X_test,
vectorizer, training_data, test_data)
def get_train_data(clf, Xtrain, ytrain, Xcv, ycv, Nchunks=50):
"""
Train the classifier in Nchunks, and return training data.
"""
# Break training data into chunks
Xchunks = [X_train[i::Nchunks] for i in range(Nchunks)]
ychunks = [y_train[i::Nchunks] for i in range(Nchunks)]
# Loop over the chunks and train the classifier
M = 0
Mlist = []
scores = []
for Xchunk, ychunk in zip(Xchunks, ychunks):
# Update measurement count
M += ychunk.size
Mlist.append(M)
# train the bernoulli model with more data
clf.partial_fit(Xchunk, ychunk, classes=[0,1])
scores.append(clf.score(X_cv, y_cv))
return clf, np.array(Mlist), np.array(scores)
alphas = np.linspace(.7, 1.0, 3)
bernoulli_clfs = []
bernoulli_score_lists = []
multinomial_clfs = []
multinomial_score_lists = []
for alpha in alphas:
# Make a Bernoulli based nieve base classifier
bernoulli_clf = BernoulliNB(binarize=.1, alpha=alpha)
bernoulli_clf, ms, scores = get_train_data(bernoulli_clf,
X_train, y_train,
X_cv, y_cv)
bernoulli_clfs.append(bernoulli_clf)
bernoulli_score_lists.append(scores)
# Make a Multinomial based nieve classifier
multinomial_clf = MultinomialNB(alpha=alpha)
multinomial_clf, ms, scores = get_train_data(multinomial_clf,
X_train, y_train,
X_cv, y_cv)
multinomial_clfs.append(multinomial_clf)
multinomial_score_lists.append(scores)
multinomial_clf.partial_fit(X_cv, y_cv)
y_test_predict = multinomial_clf.predict(X_test)
out_frame = DataFrame(data={'id': test_data['id'],
'sentiment': y_test_predict})
out_frame.to_csv('Bag_of_Words_model.csv', index=False, quoting=3)
if __name__ == "__main__":
#
import doctest
doctest.testmod()
# plot the score curve during training
import matplotlib.pyplot as plt
fig = plt.figure(0, (8,8), facecolor='white')
fig.clf()
box = [.1, .1, .8, .8]
ax = fig.add_axes(box)
ax.plot(Mlist, bernoulli_scores, linewidth=4, label="BernoulliNB")
ax.plot(Mlist, multinomial_scores, linewidth=4, label="MultinomialNB")
ax.set_title("Sentiment prediction based on movie review text classification", fontweight='bold')
ax.legend()
fig.show()
|
"""
Splash can send outgoing network requests through an HTTP proxy server.
This modules provides classes ("proxy factories") which define
which proxies to use for a given request. QNetworkManager calls
a proxy factory for each outgoing request.
Not to be confused with Splash Proxy mode when Splash itself works as
an HTTP proxy (see :mod:`splash.proxy_server`).
"""
from __future__ import absolute_import
import re
import os
import urlparse
import ConfigParser
from PyQt4.QtNetwork import QNetworkProxy
from splash.render_options import BadOption
from splash.qtutils import create_proxy, validate_proxy_type
from splash.utils import path_join_secure
class _BlackWhiteSplashProxyFactory(object):
"""
Proxy factory that enables non-default proxy list when
requested URL is matched by one of whitelist patterns
while not being matched by one of the blacklist patterns.
"""
def __init__(self, blacklist=None, whitelist=None, proxy_list=None):
self.blacklist = blacklist or []
self.whitelist = whitelist or []
self.proxy_list = proxy_list or []
def queryProxy(self, query=None, *args, **kwargs):
protocol = unicode(query.protocolTag())
url = unicode(query.url().toString())
if self.shouldUseProxyList(protocol, url):
return self._customProxyList()
return self._defaultProxyList()
def shouldUseProxyList(self, protocol, url):
if not self.proxy_list:
return False
if protocol not in ('http', 'https'):
# don't try to proxy unknown protocols
return False
if any(re.match(p, url) for p in self.blacklist):
return False
if any(re.match(p, url) for p in self.whitelist):
return True
return not bool(self.whitelist)
def _defaultProxyList(self):
return [QNetworkProxy(QNetworkProxy.DefaultProxy)]
def _customProxyList(self):
return [
create_proxy(host, port, username, password, type)
for host, port, username, password,type in self.proxy_list
]
class ProfilesSplashProxyFactory(_BlackWhiteSplashProxyFactory):
"""
This proxy factory reads BlackWhiteQNetworkProxyFactory
parameters from ini file; name of the profile can be set per-request
using GET parameter.
Example config file for 'mywebsite' proxy profile::
; /etc/splash/proxy-profiles/mywebsite.ini
[proxy]
host=proxy.crawlera.com
port=8010
username=username
password=password
type=HTTP
[rules]
whitelist=
.*mywebsite\.com.*
blacklist=
.*\.js.*
.*\.css.*
.*\.png
If there is ``default.ini`` proxy profile in profiles folder
it will be used when no profile is specified in GET parameter.
If GET parameter is 'none' or empty ('') no proxy will be used even if
``default.ini`` is present.
"""
NO_PROXY_PROFILE_MSG = 'Proxy profile does not exist'
def __init__(self, proxy_profiles_path, profile_name):
self.proxy_profiles_path = proxy_profiles_path
blacklist, whitelist, proxy_list = self._getFilterParams(profile_name)
super(ProfilesSplashProxyFactory, self).__init__(blacklist, whitelist, proxy_list)
def _getFilterParams(self, profile_name=None):
"""
Return (blacklist, whitelist, proxy_list) tuple
loaded from profile ``profile_name``.
"""
if profile_name is None:
profile_name = 'default'
ini_path = self._getIniPath(profile_name)
if not os.path.isfile(ini_path):
profile_name = 'none'
if profile_name == 'none':
return [], [], []
ini_path = self._getIniPath(profile_name)
return self._parseIni(ini_path)
def _getIniPath(self, profile_name):
filename = profile_name + '.ini'
try:
return path_join_secure(self.proxy_profiles_path, filename)
except ValueError as e:
# security check fails
print(e)
raise BadOption(self.NO_PROXY_PROFILE_MSG)
def _parseIni(self, ini_path):
parser = ConfigParser.ConfigParser(allow_no_value=True)
if not parser.read(ini_path):
raise BadOption(self.NO_PROXY_PROFILE_MSG)
blacklist = _get_lines(parser, 'rules', 'blacklist', [])
whitelist = _get_lines(parser, 'rules', 'whitelist', [])
try:
proxy = dict(parser.items('proxy'))
except ConfigParser.NoSectionError:
raise BadOption("Invalid proxy profile: no [proxy] section found")
try:
host = proxy['host']
except KeyError:
raise BadOption("Invalid proxy profile: [proxy] host is not found")
try:
port = int(proxy['port'])
except KeyError:
raise BadOption("Invalid proxy profile: [proxy] port is not found")
except ValueError:
raise BadOption("Invalid proxy profile: [proxy] port is incorrect")
if 'type' in proxy:
validate_proxy_type(proxy['type'])
proxy_list = [(host, port,
proxy.get('username'), proxy.get('password'),
proxy.get('type'))]
return blacklist, whitelist, proxy_list
class DirectSplashProxyFactory(object):
"""
This proxy factory will set the proxy passed to a render request
using a parameter.
If GET parameter is a fully qualified URL, use the specified proxy.
The syntax to specify the proxy is:
[protocol://][user:password@]proxyhost[:port])
Where protocol is either ``http`` or ``socks5``. If port is not specified,
it's assumed to be 1080.
"""
def __init__(self, proxy):
url = urlparse.urlparse(proxy)
if url.scheme and url.scheme in ('http', 'socks5') and url.hostname:
self.proxy = create_proxy(
url.hostname,
url.port or 1080,
username=url.username,
password=url.password,
type=url.scheme.upper()
)
else:
raise BadOption('Invalid proxy URL format.')
def queryProxy(self, *args, **kwargs):
return [self.proxy]
def getFactory(ini_path, parameter):
"""
Returns the appropriate factory depending on the value of
ini_path and parameter
"""
if parameter and re.match('^\w+://', parameter):
return DirectSplashProxyFactory(parameter)
else:
if ini_path:
return ProfilesSplashProxyFactory(ini_path, parameter)
else:
return None
def _get_lines(config_parser, section, option, default):
try:
lines = config_parser.get(section, option).splitlines()
return [line for line in lines if line]
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
|
"""
User defined type utilities for psycopg2.
"""
import psycopg2
def get_type_oid(cursor, typename, namespace="public"):
"""
Get a user defined type oid using a psycopg2 cursor, the typename
and namespace.
"""
cursor.execute("""
SELECT pgt.oid FROM pg_type pgt
JOIN pg_namespace pgn ON pgt.typnamespace = pgn.oid
WHERE pgt.typname = %(typename)s
AND pgn.nspname = %(namespace)s;""",
{'typename': typename,
'namespace': namespace})
return cursor.fetchone()[0]
def get_type_name(cursor, oid, namespace="public"):
"""
Get a user defined type name using a psycopg2 cursor, the oid
and namespace.
"""
cursor.execute("""
SELECT pgt.oid FROM pg_type pgt
JOIN pg_namespace pgn ON pgt.typnamespace = pgn.oid
WHERE pgt.oid = %(oid)s
AND pgn.nspname = %(namespace)s;""",
{'oid': oid,
'namespace': namespace})
return cursor.fetchone()[0]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('results', '0007_auto_20160407_2104'),
('results', '0006_auto_20160406_0659'),
]
operations = [
]
|
def run(cfg):
"""JMS Fix to deal with 11.1.1.5+ requirements"""
domainPath=cfg.getProperty('wls.domain.dir')
domainName=cfg.getProperty('wls.domain.name')
domainFullPath=str(domainPath) + '/' + str(domainName)
try:
readDomain(domainFullPath)
cd('/JMSSystemResource/jmsResources/JmsResource/NO_NAME_0')
delete('dist_wli.reporting.purge.queue_auto_1_auto','DistributedQueue')
except Exception, error:
log.error('Unable to update domain [' + domainFullPath + ']')
raise error
else:
updateDomain()
closeDomain()
|
from six import iteritems
from django.core.exceptions import ValidationError
from django.contrib.gis.geos import GEOSGeometry
from rest_framework.fields import Field
from ashlar.validators import validate_json_schema
class JsonBField(Field):
""" Custom serializer class for JsonB
Do no transformations, as we always want python dicts
Ensure top level is an object or array
"""
type_name = 'JsonBField'
def to_representation(self, value):
return value
def to_internal_value(self, value):
if isinstance(value, dict) or isinstance(value, list):
return value
elif self.allow_null and not value:
return None
else:
raise ValidationError('Array or object required')
class DropJsonKeyException(Exception):
"""A way for JSON transformation functions to signal that a field should be dropped.
Raising this exception from a transform function does not imply an error condition, but rather
signals to the MethodTransformJsonField that the key/value pair passed to the transformation
function should be omitted from the response.
"""
pass
class MethodTransformJsonField(Field):
"""Custom field to filter JSON fields by serializer method before returning
Must be supplied the name of a filter_method on initialization. The filter_method
must be a method on the parent serializer and will be passed two parameters:
- key (the key at the root level of the JSON object)
- val (the value associated with key)
filter_method should return a transformed version of value, or raise a DropJsonKeyException
to specify that the key/value pair should be dropped from the response.
"""
# Loosely adapted from DRF's SerializerMethodField
type_name = 'MethodFilteredJsonField'
def __init__(self, transform_method_name=None, **kwargs):
self.transform_method_name = transform_method_name
kwargs['read_only'] = True
super(MethodTransformJsonField, self).__init__(**kwargs)
def bind(self, field_name, parent):
default_transform = 'transform_{field_name}'.format(field_name=field_name)
if self.transform_method_name is None:
self.transform_method_name = default_transform
super(MethodTransformJsonField, self).bind(field_name, parent)
def to_representation(self, value):
"""Transforms value's root-level key/value pairs based on parent.transform_method_name
Assumes value is a dict.
"""
transform_method = getattr(self.parent, self.transform_method_name)
representation = {}
for key, val in iteritems(value):
try:
(new_key, new_val) = transform_method(key, val)
representation[new_key] = new_val
except DropJsonKeyException:
continue
return representation
class JsonSchemaField(JsonBField):
"""Json Field that also validates whether it is a JSON-Schema"""
type_name = 'JsonSchemaField'
validators = [validate_json_schema]
class GeomBBoxField(Field):
"""Serialize a geometry as a bounding box"""
read_only = True
def to_representation(self, value):
if not (issubclass(value.__class__, GEOSGeometry)):
msg = 'Can\'t apply GeomBBoxField to non-Geometry class {cls}'
raise ValidationError(msg.format(cls=value.__class__.__name__))
xmin, ymin, xmax, ymax = value.extent
return ({"lon": xmin, "lat": ymin}, {"lon": xmax, "lat": ymax})
|
import getpass
import logging
import os
import shutil
from django.conf import settings
log = logging.getLogger(__name__)
SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
def copy(path, target, file=False):
"""
A better copy command that works with files or directories.
Respects the ``MULTIPLE_APP_SERVERS`` setting when copying.
"""
MULTIPLE_APP_SERVERS = getattr(settings, 'MULTIPLE_APP_SERVERS', [])
if MULTIPLE_APP_SERVERS:
log.info("Remote Copy %s to %s" % (path, target))
for server in MULTIPLE_APP_SERVERS:
mkdir_cmd = ("ssh %s@%s mkdir -p %s" % (SYNC_USER, server, target))
ret = os.system(mkdir_cmd)
if ret != 0:
log.error("COPY ERROR to app servers:")
log.error(mkdir_cmd)
if file:
slash = ""
else:
slash = "/"
# Add a slash when copying directories
sync_cmd = ("rsync -e 'ssh -T' -av --delete %s%s %s@%s:%s"
% (path, slash, SYNC_USER, server, target))
ret = os.system(sync_cmd)
if ret != 0:
log.error("COPY ERROR to app servers.")
log.error(sync_cmd)
else:
log.info("Local Copy %s to %s" % (path, target))
if file:
if os.path.exists(target):
os.remove(target)
shutil.copy2(path, target)
else:
if os.path.exists(target):
shutil.rmtree(target)
shutil.copytree(path, target)
def copy_to_app_servers(full_build_path, target, mkdir=True):
"""
A helper to copy a directory across app servers
"""
log.info("Copying %s to %s" % (full_build_path, target))
for server in settings.MULTIPLE_APP_SERVERS:
mkdir_cmd = ("ssh %s@%s mkdir -p %s" % (SYNC_USER, server, target))
ret = os.system(mkdir_cmd)
if ret != 0:
log.error("COPY ERROR to app servers:")
log.error(mkdir_cmd)
sync_cmd = ("rsync -e 'ssh -T' -av --delete %s/ %s@%s:%s"
% (full_build_path, SYNC_USER, server, target))
ret = os.system(sync_cmd)
if ret != 0:
log.error("COPY ERROR to app servers.")
log.error(sync_cmd)
def copy_file_to_app_servers(from_file, to_file):
"""
A helper to copy a single file across app servers
"""
log.info("Copying %s to %s" % (from_file, to_file))
to_path = os.path.dirname(to_file)
for server in settings.MULTIPLE_APP_SERVERS:
mkdir_cmd = ("ssh %s@%s mkdir -p %s" % (SYNC_USER, server, to_path))
ret = os.system(mkdir_cmd)
if ret != 0:
log.error("COPY ERROR to app servers.")
log.error(mkdir_cmd)
sync_cmd = ("rsync -e 'ssh -T' -av --delete %s %s@%s:%s" % (from_file,
SYNC_USER,
server,
to_file))
ret = os.system(sync_cmd)
if ret != 0:
log.error("COPY ERROR to app servers.")
log.error(sync_cmd)
def run_on_app_servers(command):
"""
A helper to copy a single file across app servers
"""
log.info("Running %s on app servers" % command)
ret_val = 0
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
for server in settings.MULTIPLE_APP_SERVERS:
ret = os.system("ssh %s@%s %s" % (SYNC_USER, server, command))
if ret != 0:
ret_val = ret
return ret_val
else:
ret = os.system(command)
return ret
|
from dmutils.audit import AuditTypes
from flask import jsonify, abort, request, current_app
from sqlalchemy.exc import IntegrityError
from sqlalchemy import asc
from sqlalchemy.types import String
from .. import main
from ... import db
from ...utils import drop_foreign_fields, json_has_required_keys
from ...validation import is_valid_service_id_or_400
from ...models import Service, DraftService, Supplier, AuditEvent, Framework
from ...service_utils import validate_and_return_updater_request, \
update_and_validate_service, index_service, validate_service, \
commit_and_archive_service, create_service_from_draft
from ...draft_utils import validate_and_return_draft_request, \
get_draft_validation_errors
@main.route('/draft-services/copy-from/<string:service_id>', methods=['PUT'])
def copy_draft_service_from_existing_service(service_id):
"""
Create a draft service from an existing service
:param service_id:
:return:
"""
is_valid_service_id_or_400(service_id)
updater_json = validate_and_return_updater_request()
service = Service.query.filter(
Service.service_id == service_id
).first_or_404()
draft_service = DraftService.query.filter(
DraftService.service_id == service_id
).first()
if draft_service:
abort(400, "Draft already exists for service {}".format(service_id))
draft = DraftService.from_service(service)
db.session.add(draft)
db.session.flush()
audit = AuditEvent(
audit_type=AuditTypes.create_draft_service,
user=updater_json['updated_by'],
data={
"draftId": draft.id,
"serviceId": service_id
},
db_object=draft
)
db.session.add(audit)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, "Database Error: {0}".format(e))
return jsonify(services=draft.serialize()), 201
@main.route('/draft-services/<int:draft_id>', methods=['POST'])
def edit_draft_service(draft_id):
"""
Edit a draft service
:param draft_id:
:return:
"""
updater_json = validate_and_return_updater_request()
update_json = validate_and_return_draft_request()
page_questions = update_json.pop('page_questions', [])
draft = DraftService.query.filter(
DraftService.id == draft_id
).first_or_404()
draft.update_from_json(update_json)
errs = get_draft_validation_errors(draft.data,
draft.data['lot'],
framework_id=draft.framework_id,
required=page_questions)
if errs:
abort(400, errs)
audit = AuditEvent(
audit_type=AuditTypes.update_draft_service,
user=updater_json['updated_by'],
data={
"draftId": draft_id,
"serviceId": draft.service_id,
"updateJson": update_json
},
db_object=draft
)
db.session.add(draft)
db.session.add(audit)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, "Database Error: {0}".format(e))
return jsonify(services=draft.serialize()), 200
@main.route('/draft-services', methods=['GET'])
def list_draft_services():
supplier_id = request.args.get('supplier_id')
service_id = request.args.get('service_id')
framework_slug = request.args.get('framework')
if supplier_id is None:
abort(400, "Invalid page argument: supplier_id is required")
try:
supplier_id = int(supplier_id)
except ValueError:
abort(400, "Invalid supplier_id: %s" % supplier_id)
supplier = Supplier.query.filter(Supplier.supplier_id == supplier_id) \
.all()
if not supplier:
abort(404, "supplier_id '%d' not found" % supplier_id)
services = DraftService.query.order_by(
asc(DraftService.framework_id),
asc(DraftService.data['lot'].cast(String).label('data_lot')),
asc(DraftService.data['serviceName'].
cast(String).label('data_servicename'))
)
if service_id:
is_valid_service_id_or_400(service_id)
services = services.filter(DraftService.service_id == service_id)
if framework_slug:
framework = Framework.query.filter(
Framework.slug == framework_slug
).first()
services = services.filter(DraftService.framework_id == framework.id)
items = services.filter(DraftService.supplier_id == supplier_id).all()
return jsonify(
services=[service.serialize() for service in items],
links=dict()
)
@main.route('/draft-services/<int:draft_id>', methods=['GET'])
def fetch_draft_service(draft_id):
"""
Return a draft service
:param draft_id:
:return:
"""
draft = DraftService.query.filter(
DraftService.id == draft_id
).first_or_404()
return jsonify(services=draft.serialize())
@main.route('/draft-services/<int:draft_id>', methods=['DELETE'])
def delete_draft_service(draft_id):
"""
Delete a draft service
:param draft_id:
:return:
"""
updater_json = validate_and_return_updater_request()
draft = DraftService.query.filter(
DraftService.id == draft_id
).first_or_404()
audit = AuditEvent(
audit_type=AuditTypes.delete_draft_service,
user=updater_json['updated_by'],
data={
"draftId": draft_id,
"serviceId": draft.service_id
},
db_object=None
)
db.session.delete(draft)
db.session.add(audit)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, "Database Error: {0}".format(e))
return jsonify(message="done"), 200
@main.route('/draft-services/<int:draft_id>/publish', methods=['POST'])
def publish_draft_service(draft_id):
"""
Publish a draft service
:param draft_id:
:return:
"""
update_details = validate_and_return_updater_request()
draft = DraftService.query.filter(
DraftService.id == draft_id
).first_or_404()
if draft.service_id:
service = Service.query.filter(
Service.service_id == draft.service_id
).first_or_404()
service_from_draft = update_and_validate_service(
service,
draft.data)
else:
service_from_draft = create_service_from_draft(draft, "enabled")
commit_and_archive_service(service_from_draft, update_details,
AuditTypes.publish_draft_service,
audit_data={'draftId': draft_id})
try:
db.session.delete(draft)
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.warning(
'Failed to delete draft {} after publishing service {}: {}'.format(
draft_id, service_from_draft.service_id, e.message)
)
index_service(service_from_draft)
return jsonify(services=service_from_draft.serialize()), 200
@main.route('/draft-services/<string:framework_slug>/create', methods=['POST'])
def create_new_draft_service(framework_slug):
"""
Create a new draft service with lot, supplier_id, draft_id, framework_id
:return: the new draft id and location e.g.
HTTP/1.1 201 Created Location: /draft-services/63636
"""
updater_json = validate_and_return_updater_request()
draft_json = validate_and_return_draft_request()
json_has_required_keys(draft_json, ['lot', 'supplierId'])
framework = Framework.query.filter(
Framework.slug == framework_slug
).first()
if framework.status != 'open':
abort(400, "'{}' is not open for submissions".format(framework_slug))
supplier_id = draft_json['supplierId']
lot = draft_json['lot']
errs = get_draft_validation_errors(draft_json, lot, slug=framework_slug)
if errs:
return jsonify(errors=errs), 400
draft_json = drop_foreign_fields(draft_json, ['supplierId'])
draft = DraftService(
framework_id=framework.id,
supplier_id=supplier_id,
data=draft_json,
status="not-submitted"
)
try:
db.session.add(draft)
db.session.flush()
audit = AuditEvent(
audit_type=AuditTypes.create_draft_service,
user=updater_json['updated_by'],
data={
"draftId": draft.id
},
db_object=draft
)
db.session.add(audit)
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, "Database Error: {0}".format(e))
return jsonify(services=draft.serialize()), 201
|
import py
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform
from rpython.rlib.rarithmetic import is_emulated_long
from rpython.translator import cdir
cdir = py.path.local(cdir)
eci = ExternalCompilationInfo(
include_dirs = [cdir],
includes = ['src/stacklet/stacklet.h'],
separate_module_files = [cdir / 'src' / 'stacklet' / 'stacklet.c'],
)
if 'masm' in dir(eci.platform): # Microsoft compiler
if is_emulated_long:
asmsrc = 'switch_x64_msvc.asm'
else:
asmsrc = 'switch_x86_msvc.asm'
eci.separate_module_files += (cdir / 'src' / 'stacklet' / asmsrc, )
rffi_platform.verify_eci(eci.convert_sources_to_files())
def llexternal(name, args, result, **kwds):
return rffi.llexternal(name, args, result, compilation_info=eci,
_nowrapper=True, **kwds)
handle = rffi.COpaquePtr(typedef='stacklet_handle', compilation_info=eci)
thread_handle = rffi.COpaquePtr(typedef='stacklet_thread_handle',
compilation_info=eci)
run_fn = lltype.Ptr(lltype.FuncType([handle, llmemory.Address], handle))
null_handle = lltype.nullptr(handle.TO)
def is_empty_handle(h):
return rffi.cast(lltype.Signed, h) == -1
newthread = llexternal('stacklet_newthread', [], thread_handle)
deletethread = llexternal('stacklet_deletethread',[thread_handle], lltype.Void)
new = llexternal('stacklet_new', [thread_handle, run_fn, llmemory.Address],
handle, random_effects_on_gcobjs=True)
switch = llexternal('stacklet_switch', [handle], handle,
random_effects_on_gcobjs=True)
destroy = llexternal('stacklet_destroy', [handle], lltype.Void)
_translate_pointer = llexternal("_stacklet_translate_pointer",
[llmemory.Address, llmemory.Address],
llmemory.Address)
|
import os
import re
import string
import sys
import time
import posixpath
import subprocess
etchosts_template = """127.0.0.1 localhost
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
"""
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
from starcluster import utils
class NullDevice():
def write(self, s):
pass
class CreateCell (ClusterSetup):
"""
Configure a custom SGE cell for a StarCluster cluster
"""
def __init__(self, privatekey, publiccert, cell, execdport, qmasterport, root, slots):
log.info("Loaded plugin: sge.CreateCell")
log.debug("sge.CreateCell.__init__ Initialising CreateCell plugin.")
log.debug("sge.CreateCell.__init__ privatekey %s" % privatekey)
log.debug("sge.CreateCell.__init__ publiccert %s" % publiccert)
log.debug("sge.CreateCell.__init__ cell %s" % cell)
log.debug("sge.CreateCell.__init__ execdport %s" % execdport)
log.debug("sge.CreateCell.__init__ qmasterport %s" % qmasterport)
log.debug("sge.CreateCell.__init__ root %s" % root)
log.debug("sge.CreateCell.__init__ slots %s" % slots)
self.headgroup = "default"
self.privatekey = privatekey
self.publiccert = publiccert
self.cell = cell
self.execdport = execdport
self.qmasterport = qmasterport
self.root = root
self.slots = slots
#""" SET HEAD NODE'S ROOT PATH TO SGE BINARIES """
#rootpath = os.environ['ROOTPATH'];
#rootpath = re.sub(r'^.', '', rootpath)
#log.info("rootpath: %s", rootpath)
#self.rootpath = rootpath
os.environ['SGE_ROOT'] = root
os.environ['SGE_CELL'] = cell
os.environ['SGE_QMASTER_PORT'] = qmasterport
os.environ['SGE_EXECD_PORT'] = execdport
def run(self, nodes, master, user, user_shell, volumes):
"""
Mount NFS shares on master and all nodes
"""
#### SET ROOT PATH
self.masterbinroot = self.getRemoteBinRoot(master)
self.localbinroot = self.getLocalBinRoot()
##### OPEN NEW PORTS ON EC2 ON HEAD
self.openSgePorts()
#### CREATE NEW CELL DIRECTORY ON HEAD AND MASTER/NODES
self.copyCellOnHead()
self.copyCell(master)
#### SET MASTER HOSTNAME AS INTERNAL IP
self.setMasterHostname(master)
#### SET HEADNODE HOSTNAME AS INTERNAL IP
self.setHeadHostname()
#### SET MASTER act_qmaster AS MASTER INTERNAL IP
self.setMasterActQmaster(master)
#### SET MASTER INTERNAL IP IN /etc/hosts
self.setMasterEtcHosts(master)
#### START SGE ON MASTER
self.restartSge(master)
#### ADD ENVIRONMENT VARIABLES TO /etc/profile ON MASTER/NODES
for node in nodes:
self.addEnvarsToProfile(node)
#### SET MASTER AS SUBMIT AND ADMIN HOST
self.setMasterSubmit(master)
#### SET HEADNODE qmaster_info AS QUICK LOOKUP FOR MASTER INFO
self.setMasterInfo(master)
#### SET MASTER'S IP ADDRESS IN act_qmaster FILE ON HEAD
self.updateHeadActQmaster(master)
#### SET HEAD AS SUBMIT AND ADMIN HOST
self.setHeadSubmit(master)
#### INSTEAD OF 'master', USE MASTER INTERNAL IP IN @allhosts
self.addMasterToAllHosts(master)
##### RESTART SGE ON MASTER/NODES
for node in nodes:
self.restartSge(node)
#### SCHEDULING INFO
self.enableSchedulingInfo()
#### ADD threaded PARALLEL ENVIRONMENT ON MASTER
self.addParallelEnvironment(master)
#### ADD NODES TO @allhosts GROUP
for node in nodes:
if node.alias != "master":
self.addToAllhosts(node, master)
##### RESTART SGE ON MASTER/NODES
for node in nodes:
self.restartSge(node)
#### REMOVE DEFAULT all.q QUEUE
self.removeAllq()
log.info("Completed plugin sge")
def getRemoteBinRoot(self, node):
"""
Return the CPU architecture-dependent path to the SGE binaries
NB: Assumes 64-bit system
"""
log.info("sge.CreateCell.getRemoteBinBoot Getting root path for node: %s", node.alias)
response = node.ssh.execute("grep vendor_id /proc/cpuinfo")
vendorid = response[0]
log.debug("sge.CreateCell.getRemoteBinBoot vendorid: %s", vendorid)
#### ASCERTAIN IF CPU IS INTEL TYPE (ELSE, MUST BE AMD TYPE)
intel = self.isIntel(vendorid)
log.debug("sge.CreateCell.getRemoteBinBoot intel: %s", intel)
#### GET BIN DIR SUBDIRS
command = "ls " + self.root + "/bin"
log.debug("sge.CreateCell.getRemoteBinBoot command: %s", command)
files = node.ssh.execute(command)
log.debug("sge.CreateCell.getRemoteBinBoot files: %s", files)
binroot = self.getBinRoot(intel, files);
log.info("sge.CreateCell.getRemoteBinBoot binroot: %s", binroot)
if binroot == "":
log.info("sge.CreateCell.getRemoteBinBoot sge.CreateCell.getRemoteBinRoot Can't find root path for vendorid: %s", vendorid)
return binroot
def getLocalBinRoot(self):
"""
Return the CPU architecture-dependent path to the SGE binaries
NB: Assumes 64-bit system
"""
log.info("sge.CreateCell.setLocalBinBoot Getting root path on local machine")
p = os.popen("grep vendor_id /proc/cpuinfo")
vendorid = p.read()
log.debug("sge.CreateCell.setLocalBinBoot vendorid: %s", vendorid)
#### ASCERTAIN IF CPU IS INTEL TYPE (ELSE, MUST BE AMD TYPE)
intel = self.isIntel(vendorid)
log.debug("sge.CreateCell.setLocalBinBoot intel: %s", intel)
#### GET BIN DIR SUBDIRS
command = "ls " + self.root + "/bin"
log.debug("sge.CreateCell.setLocalBinBoot command: %s", command)
p = os.popen(command)
filelist = p.read();
files = filelist.split("\n");
log.debug("sge.CreateCell.setLocalBinBoot files: %s", files)
binroot = self.getBinRoot(intel, files);
log.info("sge.CreateCell.setLocalBinBoot binroot: %s", binroot)
if binroot == "":
log.info("sge.CreateCell.setLocalBinRoot Can't find root path for vendorid: %s", vendorid)
return binroot
def isIntel(self, vendorid):
log.info("sge.CreateCell.isIntel vendorid: %s", vendorid)
match = re.search('vendor_id\s+:\s+GenuineIntel\s*', vendorid)
log.info("sge.CreateCell.isIntel match: %s", match)
if match == None:
return False
return True
def getBinRoot(self, intel, files):
binroot = ""
for file in files:
if intel:
if file == "lx24-x86":
binroot = self.root + "/bin/lx24-x86"
break
elif file == "linux-x64":
binroot = self.root + "/bin/linux-x64"
break
else:
if file == "lx24-amd64":
binroot = self.root + "/bin/lx24-amd64"
break
elif file == "linux-x64":
binroot = self.root + "/bin/linux-x64"
break
#log.info("binroot: %s", binroot)
return binroot
def openSgePorts(self):
"""
Open the particular SGE qmaster and execd daemon ports for this cluster
"""
log.info("Opening SGE qmaster and execd ports")
qmasterport = self.qmasterport
execdport = self.execdport
cluster = self.cell
envars = self.exportEnvironmentVars()
log.debug("sge.CreateCell.openSgePorts qmasterport; %s", qmasterport)
log.debug("sge.CreateCell.openSgePorts execdport; %s", execdport)
log.debug("sge.CreateCell.openSgePorts envars; %s", envars)
#### SET EC2 KEY FILE ENVIRONMENT VARIABLES
ec2vars = "export EC2_PRIVATE_KEY=" + self.privatekey + "; "
ec2vars += "export EC2_CERT=" + self.publiccert + "; "
# HEAD NODE (I.E., NOT MASTER OR NODE)
commands = [
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + execdport + ' -P tcp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + execdport + ' -P udp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + qmasterport + ' -P tcp',
ec2vars + 'ec2-authorize @sc-' + cluster + ' -p ' + qmasterport + ' -P udp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + execdport + ' -P tcp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + execdport + ' -P udp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + qmasterport + ' -P tcp',
ec2vars + 'ec2-authorize ' + self.headgroup + ' -p ' + qmasterport + ' -P udp'
]
for command in commands:
self.runSystemCommand(command);
def runSystemCommand(self, command):
log.info(command)
os.system(command)
def setMasterActQmaster(self, master):
"""
Set master hostname as INTERNAL IP to disambiguate from other
cluster 'master' nodes given multiple clusters
"""
log.info("Setting act_qmaster file contents")
hostname = self.getHostname(master)
act_qmaster = self.root + "/" + self.cell + "/common/act_qmaster"
command = "echo '" + hostname + "' > " + act_qmaster
log.debug("sge.CreateCell.setMasterActQmaster command: %s", command)
master.ssh.execute(command)
def setMasterHostname(self, master):
"""
Set master hostname as internal IP to disambiguate
from other 'master' nodes given multiple clusters
"""
log.info("Setting master hostname")
hostname = self.getHostname(master)
command = "hostname " + hostname
log.info("sge.CreateCell.setMasterHostname command: %s", command)
master.ssh.execute(command)
command = "echo '" + hostname + "' > /etc/hostname"
log.info("sge.CreateCell.setMasterHostname command: %s", command)
master.ssh.execute(command)
def setHeadHostname(self):
"""
Set master hostname as internal IP to disambiguate
from other 'master' nodes given multiple clusters
"""
log.info("Setting headnode hostname")
command = "curl -s http://169.254.169.254/latest/meta-data/local-hostname"
hostname = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
log.info("sge.CreateCell.setHeadnodeHostname hostname: %s", hostname)
command = "hostname " + hostname
log.info("sge.CreateCell.setHeadnodeHostname command: %s", command)
os.system(command)
command = "echo '" + hostname + "' > /etc/hostname"
log.info("sge.CreateCell.setHeadnodeHostname command: %s", command)
os.system(command)
def getHostname(self, master):
log.info("sge.CreateCell.getHostname returning hostname: %s", master.private_dns_name)
return master.private_dns_name
def setMasterEtcHosts (self, master):
log.info("Adding master hostname to own /etc/hosts")
envars = self.exportEnvironmentVars()
command = "cat /etc/hosts"
log.debug("sge.CreateCell.setMasterEtcHosts command: %s" % command)
etchosts = etchosts_template
ip_address = master.ip_address
dns_name = master.dns_name
insert = master.private_ip_address
insert += "\t"
insert += self.getHostname(master)
insert += "\t"
insert += "localhost"
etchosts += insert + "\n"
log.debug("sge.CreateCell.setMasterEtcHosts AFTER etchosts: %s", etchosts)
etchosts_file = master.ssh.remote_file("/etc/hosts")
print >> etchosts_file, etchosts
etchosts_file.close()
# DEPRECATED:
#command = "/etc/init.d/networking restart"
command = "sh -c \"ifdown eth0 && ifup eth0\""
log.debug("sge.CreateCell.setMasterEtcHosts command: %s", command)
result = master.ssh.execute(command)
log.debug("sge.CreateCell.setMasterEtcHosts result: %s", result)
def setMasterSubmit(self, master):
hostname = self.getHostname(master)
envars = self.exportEnvironmentVars()
add_submit = envars + self.masterbinroot + '/qconf -as ' + hostname
add_admin = envars + self.masterbinroot + '/qconf -ah ' + hostname
log.debug("sge.CreateCell.setMasterSubmit add_submit: %s", add_submit)
master.ssh.execute(add_submit)
log.debug("sge.CreateCell.setMasterSubmit add_admin: %s", add_admin)
master.ssh.execute(add_admin)
def addMasterToAllHosts (self, master):
log.info("sge.CreateCell.addMasterToAllHosts Replacing 'master' with master INTERNAL IP in @allhosts")
envars = self.exportEnvironmentVars()
command = envars + self.localbinroot + "/qconf -shgrp @allhosts"
log.info("sge.CreateCell.addMasterToAllHosts command: %s" % command)
allhosts_template = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
log.info("sge.CreateCell.addMasterToAllHosts BEFORE allhosts_template: %s", allhosts_template)
#### GET hostname
hostname = self.getHostname(master)
#### REMOVE master AND hostname IF EXISTS
match = "master"
allhosts_template = string.replace(allhosts_template, "NONE", '')
allhosts_template = string.replace(allhosts_template, match, '')
allhosts_template = string.replace(allhosts_template, hostname, '')
#### ADD hostname
allhosts_template = allhosts_template.strip('\s\n\r')
allhosts_template += " " + hostname
#allhosts_template = re.sub('\s+$/s', '', allhosts_template)
log.info("sge.CreateCell.addMasterToAllHosts AFTER allhosts_template: %s", allhosts_template)
filename = "/tmp/" + self.cell + "-allhosts.txt"
allhosts_file = open(filename, 'w')
print >> allhosts_file, allhosts_template
allhosts_file.close()
log.info("sge.CreateCell.addMasterToAllHosts printed filename: %s", filename)
set_command = envars + self.localbinroot + "/qconf -Mhgrp " + filename
log.info("sge.CreateCell.addMasterToAllHosts set_command: %s" % set_command)
os.system(set_command)
def addToAllhosts(self, node, master):
"""
Add host to @allhosts group to enable it to be an execution host
"""
log.info("Add %s to @allhosts group", node.alias)
os.environ['SGE_ROOT'] = self.root
os.environ['SGE_CELL'] = self.cell
os.environ['SGE_QMASTER_PORT'] = self.qmasterport
os.environ['SGE_EXECD_PORT'] = self.execdport
hostname = node.alias
#if node.alias == "master":
# hostname = self.getHostname(master)
command = self.masterbinroot + "/qconf -aattr hostgroup hostlist " + hostname + " @allhosts >> /tmp/allhosts.out; "
log.info("sge.addToAllhosts command: %s", command)
envars = self.exportEnvironmentVars()
original_stdout = sys.stdout
sys.stdout = NullDevice()
master.ssh.execute(envars + command)
sys.stdout = original_stdout
def setHeadSubmit(self, master):
"""
Add head node to submit hosts and admin hosts lists
"""
log.info("Adding head node to submit hosts and admin hosts lists")
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
envars = self.exportEnvironmentVars()
add_submit = envars + self.masterbinroot + '/qconf -as ' + head_ip
add_admin = envars + self.masterbinroot + '/qconf -ah ' + head_ip
log.debug("sge.CreateCell.setHeadSubmit %s", add_submit)
master.ssh.execute(add_submit)
log.debug("sge.CreateCell.setHeadSubmit %s", add_admin)
master.ssh.execute(add_admin)
def getHeadIp(self):
log.info("sge.CreateCell.getHeadIp Getting headnode internal IP")
p = os.popen('curl -s http://169.254.169.254/latest/meta-data/instance-id');
instanceid = p.read()
log.debug("sge.CreateCell.getHeadIp instanceid: %s" % instanceid)
command = "ec2-describe-instances -K " + self.privatekey \
+ " -C " + self.publiccert \
+ " " + instanceid
log.debug("sge.CreateCell.getHeadIp command: %s" % command)
p = os.popen(command);
reservation = p.read()
log.debug("sge.CreateCell.getHeadIp reservation: %s" % reservation)
instance = reservation.split("INSTANCE")[1];
log.debug("sge.CreateCell.getHeadIp instance: %s" % instance)
instanceRow = instance.split('\t')
self.head_ip = instanceRow[17]
log.info("sge.CreateCell.getHeadIp self.head_ip: %s" % self.head_ip)
def removeAllq (self):
"""
Delete default 'all.q' queue
"""
log.info("sge.CreateCell.removeAllq Removing the default 'all.q' queue")
envars = self.exportEnvironmentVars()
command = envars + self.localbinroot + "/qconf -dq all.q"
log.debug("sge.CreateCell.removeAllq command: %s" % command)
os.system(command)
def addEnvarsToProfile(self, node):
"""
Add environment variables (SGE_CELL, ports, etc.) to /etc/profile
"""
log.info("Adding environment variables to /etc/profile")
envars = self.exportEnvironmentVars();
log.debug("sge.CreateCell.addEnvarsToProfile envars: echo '%s' >> /etc/profile", envars)
node.ssh.execute("echo '" + envars + "' >> /etc/profile")
def enableSchedulingInfo(self):
"""
Enable job scheduling info output for 'qstat -j'
"""
log.info("Enabling job scheduling info")
envars = self.exportEnvironmentVars()
log.debug(envars + self.masterbinroot + "/qconf -ssconf")
queue_template = subprocess.Popen(envars + self.masterbinroot + "/qconf -ssconf", stdout=subprocess.PIPE, shell=True).stdout.read()
log.debug("sge.CreateCell.enableSchedulingInfo BEFORE queue_template: %s", queue_template)
match = "schedd_job_info false"
insert = "schedd_job_info true"
queue_template = string.replace(queue_template, match, insert)
log.debug("sge.CreateCell.enableSchedulingInfo AFTER queue_template: %s", queue_template)
pid = os.getpid()
filename = "/tmp/queue-" + str(os.getpid()) + ".txt"
queue_file = open(filename, 'w')
print >> queue_file, queue_template
queue_file.close()
cmd = envars + self.masterbinroot + "/qconf -Msconf " + filename
log.debug(cmd)
os.system(cmd)
remove = "rm -fr " + filename
log.debug(remove)
os.system(remove)
def addParallelEnvironment(self, master):
"""
Add 'threaded' parallel environment
"""
log.info("Adding 'threaded' parallel environment")
sge_pe_template = """
pe_name threaded
slots %s
user_lists NONE
xuser_lists NONE
start_proc_args /bin/true
stop_proc_args /bin/true
allocation_rule $pe_slots
control_slaves TRUE
job_is_first_task FALSE
urgency_slots min
accounting_summary FALSE
"""
log.debug("addParallelEnvironment sge_pe_template: %s", sge_pe_template)
#### PRINT TEMPLATE FILE
pe_file = master.ssh.remote_file("/tmp/pe.txt")
print >> pe_file, sge_pe_template % 99999
pe_file.close()
envars = self.exportEnvironmentVars()
master.ssh.execute(envars + self.masterbinroot + "/qconf -Ap %s &> /tmp/pe.out" % pe_file.name)
master.ssh.execute(envars + self.masterbinroot + '/qconf -mattr queue pe_list "threaded" all.q &> /tmp/pe2q.out')
def setHeadSubmit(self, master):
"""
Add head node to submit and admin hosts lists on master
"""
log.info("Adding head node to submit hosts and admin hosts lists")
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
envars = self.exportEnvironmentVars()
add_submit = envars + self.masterbinroot + '/qconf -as ' + self.head_ip
add_admin = envars + self.masterbinroot + '/qconf -ah ' + self.head_ip
log.info("sge.CreateCell.setHeadSubmit %s", add_submit)
master.ssh.execute(add_submit)
log.info("sge.CreateCell.setHeadSubmit %s", add_admin)
master.ssh.execute(add_admin)
def restartSge(self, node):
"""
Restart SGE qmaster (master) and execd (master + nodes) daemons
"""
log.info("Restarting SGE qmaster and execd daemons")
binroot = self.getRemoteBinRoot(node)
log.info("CreateCell.restartSge binroot: %s", binroot)
envars = self.exportEnvironmentVars()
stop_execd = envars + binroot + '/qconf -ke all'
stop_qmaster = envars + binroot + '/qconf -km'
start_qmaster = envars + binroot + '/sge_qmaster'
start_execd = envars + binroot + '/sge_execd'
sleep = 1
log.info("sge.CreateCell.restartSge Doing RESTART SGE: %s (%s)", node.alias, node.private_ip_address)
#### KILL ANY LINGERING TERMINATED PROCESSES
killall = "/bin/ps aux | grep sgeadmin | cut -c9-14 | xargs -n1 -iPID /bin/kill -9 PID &> /dev/null"
log.info(killall)
node.ssh.execute(killall, True, False, True)
killall = "/bin/ps aux | grep root | grep sge | cut -c9-14 | xargs -n1 -iPID /bin/kill -9 PID &> /dev/null"
log.info(killall)
node.ssh.execute(killall, True, False, True)
log.info("sge.CreateCell.restartSge node.alias: %s", node.alias)
if node.alias == "master":
time.sleep(float(sleep))
log.info("sge.CreateCell.restartSge %s", start_qmaster)
node.ssh.execute(start_qmaster)
log.info("sge.CreateCell.restartSge %s", start_execd)
node.ssh.execute(start_execd)
def settingsCommand(self):
target = self.root + "/" + self.cell + "/common"
cmd = 'cd ' + target + '; '
cmd += self.exportEnvironmentVars()
cmd += self.root + '/util/create_settings.sh ' + target
log.debug("sge.CreateCell.createSettings cmd: %s", cmd)
return cmd
def createSettings(self, node):
"""
Generate settings.sh file containing SGE_CELL, SGE_ROOT and port info
"""
log.info("Generating settings.sh file")
log.debug("sge.CreateCell.createSettings CreateCell.createSettings(master)")
cmd = self.settingsCommand()
log.debug("sge.CreateCell.createSettings cmd: %s", cmd)
node.ssh.execute(cmd)
def exportEnvironmentVars(self):
vars = 'export SGE_ROOT=' + self.root + '; '
vars += 'export SGE_CELL=' + self.cell + '; '
vars += 'export SGE_QMASTER_PORT=' + self.qmasterport + '; '
vars += 'export SGE_EXECD_PORT=' + self.execdport + '; '
return vars
def updateHeadIp(self):
"""
Set hostname as head_ip (in case has changed due to reboot)
"""
log.info("Updating hostname on head node")
log.debug("sge.CreateCell.updateHeadIp self.head_long_ip: %s", self.head_long_ip)
cmd = "hostname " + self.head_long_ip
log.debug("sge.CreateCell.updateHeadIp cmd: %s", cmd)
os.system(cmd)
def updateHeadActQmaster(self, master):
"""
Replace 'master' with 'ip-XXX-XXX-XXX-XXX' hostname in act_qmaster file
"""
log.info("Updating act_qmaster file")
log.debug("sge.CreateCell.updateHeadActQmaster CreateCell.updateHeadActQmaster(nodes)")
target = self.root + "/" + self.cell
act_qmaster = target + "/common/act_qmaster"
log.debug("sge.CreateCell.updateHeadActQmaster act_qmaster: %s", act_qmaster)
hostname = self.getHostname(master)
log.debug("sge.CreateCell.updateHeadActQmaster hostname: %s", hostname)
cmd = "echo '" + hostname + "' > " + act_qmaster
log.debug("sge.CreateCell.updateHeadActQmaster cmd: %s", cmd)
os.system(cmd)
def setMasterInfo(self, master):
"""
Set ip, dns name and instance ID in 'qmaster_info' file
"""
target = self.root + "/" + self.cell
qmaster_info = target + "/qmaster_info"
log.info("Setting qmaster_info file: %s", qmaster_info)
instanceid = master.ssh.execute("curl -s http://169.254.169.254/latest/meta-data/instance-id")
log.info("CreateCell.setMasterInfo instanceid: %s", instanceid)
cmd = "echo '" \
+ master.private_dns_name + "\t" \
+ master.private_ip_address + "\t" \
+ instanceid[0] + "\t" \
+ master.public_dns_name + "\t" \
+ master.ip_address \
+ "' > " + qmaster_info
log.info("CreateCell.setMasterInfo cmd: %s", cmd)
os.system(cmd)
def copyCellCommands(self):
source = self.root + "/default"
target = self.root + "/" + self.cell
return (
'mkdir ' + target + ' &> /dev/null',
'rsync -a ' + source + "/* " + target + " --exclude *tar.gz",
'chown -R sgeadmin:sgeadmin ' + target
)
def copyCellOnHead(self):
"""
Copy cell dir from default dir
"""
log.info("Copying cell directory on head node")
log.debug("sge.CreateCell.copyCellOnHead CreateCell.copyCellOnHead()")
commands = self.copyCellCommands()
log.debug("sge.CreateCell.copyCellOnHead commands: %s", commands)
target = self.root + "/" + self.cell
log.debug("sge.CreateCell.copyCell target: %s", target)
log.debug("sge.CreateCell.copyCellOnHead os.path.isdir(target): %s", os.path.isdir(target))
if not os.path.isdir(target):
for command in commands:
log.info(command)
os.system(command)
##### CREATE NEW settings.sh FILE
command = self.settingsCommand()
log.info(command)
os.system(command)
def copyCell(self, node):
"""
Copy cell dir from default dir
"""
log.info("Copying cell directory on %s", node.alias)
log.debug("sge.CreateCell.copyCell CreateCell.copyCell(" + node.alias + ")")
commands = self.copyCellCommands()
log.debug("sge.CreateCell.copyCell commands: %s", commands)
target = self.root + "/" + self.cell
log.debug("sge.CreateCell.copyCell target: %s", target)
log.debug("sge.CreateCell.copyCell os.path.isdir(target): %s", os.path.isdir(target))
#if not os.path.isdir(target):
for command in commands:
log.info(command)
node.ssh.execute(command, True, False, True)
#### PAUSE TO ALLOW FILE SYSTEM TO CATCH UP
time.sleep(2)
##### CREATE NEW settings.sh FILE
command = self.settingsCommand()
log.info("Creating settings.sh file")
log.info(command)
os.system(command)
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing 'on_add_node' for plugin: sge.CreateCell");
log.info("Adding %s", node.alias)
log.debug("sge.CreateCell.on_add_node CreateCell.on_add_node(self, node, nodes, master, user, user_shell, volumes)")
log.debug("sge.CreateCell.on_add_node node.private_dns_name: %s" % node.private_dns_name)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### ADD ENVIRONMENT VARIABLES TO /etc/profile ON MASTER
self.addEnvarsToProfile(node)
##### CREATE NEW CELL DIRECTORY ON HEAD AND MASTER
self.copyCell(node);
##### RESTART SGE ON NODE
self.restartSge(node)
#### ADD NODE TO @allhosts GROUP
self.addToAllhosts(node, master)
log.info("Completed 'on_add_node' for plugin: sge.CreateCell");
def on_remove_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing on_remove_node for plugin: sge.CreateCell")
log.info("Removing %s " % node.alias)
log.debug("sge.CreateCell.on_remove_node node.private_dns_name: %s" % node.private_dns_name)
|
import glob
import gzip
import hashlib
import logging
import os
import re
import subprocess
import sys
import textwrap
import tempfile
from collections import defaultdict
import ftputil
import pandas as pd
import requests
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from colors import green
from bs4 import BeautifulSoup
from tabulate import tabulate
from clashchimeras.parsers import Fasta
logger = logging.getLogger('root')
class Releases:
def __init__(self, gencodeOrganism='H.sapiens', mirbaseOrganism='hsa',
path=os.path.join(os.path.expanduser("~"), '.CLASHchimeras')):
organisms = {'H.sapiens': 'http://www.gencodegenes.org/releases/',
'M.musculus': 'http://www.gencodegenes.org/mouse_releases/'}
shortOrganisms = {'H.sapiens': 'human',
'M.musculus': 'mouse'}
self.gencodeOrganism = gencodeOrganism
self.mirbaseOrganism = mirbaseOrganism
self.path = path
self.url = organisms[self.gencodeOrganism]
self.gencodeShortOrganism = shortOrganisms[self.gencodeOrganism]
self.mirbaseUrl = 'ftp://mirbase.org/pub/mirbase/CURRENT/README'
self.df = self.openUrl()
self.mirbaseDf = self.openMirbaseReadme()
self.selectGencodeRelease()
self.selectMirbaseRelease()
g = Gencode(release=self.gencodeRelease,
path=self.gencodePath,
ftpDir=self.gencodeFtpDir)
g.download()
m = Mirbase(release=self.mirbaseRelease,
path=self.mirbasePath,
ftpDir=self.mirbaseFtpDir,
organism=self.mirbaseOrganism)
m.download()
m.process()
def openUrl(self):
r = requests.get(self.url)
data = r.text
soup = BeautifulSoup(data)
table = soup.find("table")
headings = [th.get_text().strip() for th in table.find("tr").find_all("th")]
dataset = defaultdict(dict)
for index, row in enumerate(table.find_all("tr")[1:]):
for i, j in zip(headings,
(td.get_text().strip() for td in row.find_all("td"))):
dataset[index][i] = j
return pd.DataFrame(dataset).transpose()
def openMirbaseReadme(self):
with ftputil.FTPHost('mirbase.org', 'anonymous', 'anonymous') as fH:
fobj = fH.open('pub/mirbase/CURRENT/README')
store = False
dataset = defaultdict(dict)
index = 0
for line in fobj.readlines():
if store:
row = line.strip().split()
if len(row) == 3 and row[1][0].isdigit():
dataset[index]['Version'] = row[0]
dataset[index]['Date'] = row[1]
dataset[index]['Entries'] = row[2]
index += 1
if 'HISTORY' in line:
store = True
return pd.DataFrame(dataset).transpose()
def downloadMirbaseRelease(self, key):
logger.warn('Are you sure??')
decision = input('Y/n: ')
if decision.lower() == 'y' or decision == '':
self.mirbaseRelease = self.mirbaseDf.loc[int(key)]['Version']
os.makedirs(os.path.join(self.path, 'Mirbase',
str(self.mirbaseRelease)), exist_ok=True)
self.mirbasePath = os.path.join(self.path, 'Mirbase',
str(self.mirbaseRelease))
self.mirbaseFtpDir = os.path.join('pub/mirbase', self.mirbaseRelease)
return True
def downloadGencodeRelease(self, key):
logger.warn('Are you sure??')
decision = input('Y/n: ')
if decision.lower() == 'y' or decision == '':
self.gencodeRelease = self.df.loc[int(key)]['GENCODE release']
os.makedirs(os.path.join(self.path, 'Gencode', self.gencodeOrganism,
str(self.gencodeRelease)), exist_ok=True)
self.gencodePath = os.path.join(self.path, 'Gencode',
self.gencodeOrganism,
str(self.gencodeRelease))
self.gencodeFtpDir = os.path.join('pub/gencode/Gencode_' +
self.gencodeShortOrganism,
'release_' + self.gencodeRelease)
return True
def selectGencodeRelease(self):
cols = self.df.columns
logger.info("Select the release that you want \n{}".format(
tabulate(self.df[[cols[2], cols[0],
cols[1], cols[3]]],
headers=['Index', cols[2], cols[0], cols[1], cols[3]],
tablefmt="simple")))
logger.warn('Please bare in mind that the automatic download relies on '
'regex search which is known to work for Gencode release 17 '
'and higher ')
releaseKeys = self.df.index
while True:
logger.warn('Please select which Gencode release to use (select index ' +
'[{}]): '.format(min(self.df.index)))
key = input('Index: ')
if key.isdigit() and int(key) in releaseKeys:
logger.warn('This will download the Gencode release {} which '
'corresponds to Ensembl release {} and Genome assembly '
'version {} freezed on {}'.format(
self.df.loc[int(key)]['GENCODE release'],
self.df.loc[int(key)]['Ensembl release'],
self.df.loc[int(key)]['Genome assembly version'],
self.df.loc[int(key)]['Freeze date *']))
if self.downloadGencodeRelease(key):
break
elif key == '':
logger.warn('This will download the latest Gencode release {} which '
'corresponds to Ensembl release {} and Genome assembly '
'version {} freezed on {}'.format(
self.df.loc[min(self.df.index)]['GENCODE release'],
self.df.loc[min(self.df.index)]['Ensembl release'],
self.df.loc[min(self.df.index)]['Genome assembly '
'version'],
self.df.loc[min(self.df.index)]['Freeze date *']))
if self.downloadGencodeRelease(min(self.df.index)):
break
else:
logger.warn('Not a valid release index:')
def selectMirbaseRelease(self):
cols = self.mirbaseDf.columns
logger.info("Select the Mirbase release you want \n{}".format(
tabulate(self.mirbaseDf, headers=['Index', cols[0], cols[1],
cols[2]], tablefmt="simple")))
releaseKeys = self.mirbaseDf.index
while True:
logger.warn('Please select which Mirbase release to use (select index ' +
'[{}]): '.format(max(self.mirbaseDf.index)))
key = input('Index: ')
if key.isdigit() and int(key) in releaseKeys:
logger.warn('This will download miRBase release {} which contains {} '
'entries and released on {}'.format(
self.mirbaseDf.loc[int(key)]['Version'],
self.mirbaseDf.loc[int(key)]['Entries'],
self.mirbaseDf.loc[int(key)]['Date']))
if self.downloadMirbaseRelease(key):
break
elif key == '':
logger.warn('This will download miRBase release {} which contains {} '
'entries and released on {}'.format(
self.mirbaseDf.loc[max(self.mirbaseDf.index)]['Version'],
self.mirbaseDf.loc[max(self.mirbaseDf.index)]['Entries'],
self.mirbaseDf.loc[max(self.mirbaseDf.index)]['Date']))
if self.downloadMirbaseRelease(max(self.mirbaseDf.index)):
break
else:
logger.warn('Not a valid release index:')
class Arguments:
def __init__(self, args, type=None):
self.args = args
if type == 'align':
if args.genomeIndex:
self.args.genomeIndex = os.path.expanduser(args.genomeIndex)
if args.transcriptomeIndex:
self.args.transcriptomeIndex = os.path.expanduser(
args.transcriptomeIndex)
if args.input:
self.args.input = os.path.expanduser(args.input)
if args.smallRNAIndex:
self.args.smallRNAIndex = os.path.expanduser(args.smallRNAIndex)
if self.args.targetRNAIndex:
self.args.targetRNAIndex = os.path.expanduser(args.targetRNAIndex)
if args.output:
self.args.output = os.path.expanduser(args.output)
if type == 'find':
if args.smallRNAAnnotation:
self.args.smallRNAAnnotation = os.path.expanduser(
args.smallRNAAnnotation)
if args.targetRNAAnnotation:
self.args.targetRNAAnnotation = os.path.expanduser(
args.targetRNAAnnotation)
if args.smallRNA:
self.args.smallRNA = os.path.expanduser(args.smallRNA)
if args.targetRNA:
self.args.targetRNA = os.path.expanduser(args.targetRNA)
self.exit = False
def hash(self, file):
sha256 = hashlib.sha256()
with open(file, 'r+b') as f:
while True:
buf = f.read(8192)
if not buf:
break
sha256.update(buf)
return sha256.hexdigest()
def validateFind(self):
if not os.path.exists(self.args.smallRNA):
logger.error('{} not found...'.format(self.args.smallRNA))
self.exit = True
if not os.path.exists(self.args.targetRNA):
logger.error('{} not found...'.format(self.args.smallRNA))
self.exit = True
if self.args.getGenomicLocationsSmallRNA:
if not self.args.smallRNAAnnotation:
logger.error('--smallRNAAnnotation -sa not specified. If you want '
'genomic locations for smallRNA, please enable '
'--getGenomicLocationsSmallRNA -ggs and specify '
'annotation file using --smallRNAAnnotation -sa')
self.exit = True
else:
if not os.path.exists(self.args.smallRNAAnnotation):
logger.error('{} not found'.format(self.args.smallRNAAnnotation))
self.exit = True
if self.args.getGenomicLocationsTargetRNA:
if not self.args.targetRNAAnnotation:
logger.error('--targetRNAAnnotation -ta not specified. If you want '
'genomic locations for targetRNA, please enable '
'--getGenomicLocationsTargetRNA -ggt and specify '
'annotation file using --targetRNAAnnotation -ta')
self.exit = True
else:
if not os.path.exists(self.args.targetRNAAnnotation):
logger.error('{} not found...'.format(self.args.targetRNAAnnotation))
self.exit = True
if self.hash(self.args.smallRNA) == self.hash(self.args.targetRNA):
logger.error('CLASHChimeras does not detect chimeras between the same '
'RNA type yet... Please hang in there, we are planning it '
'in the feature')
self.exit = True
if self.exit:
logger.error('Exiting because of the above errors...')
sys.exit()
elif self.hash(self.args.smallRNA) == self.hash(self.args.targetRNA):
logger.error('CLASHChimeras does not detect chimeras between the same '
'RNA type yet... Please hang in there, we are planning it '
'in the feature')
sys.exit()
def validateAlign(self):
if not os.path.exists(self.args.input):
logger.error('{} not found...'.format(self.args.input))
self.exit = True
if self.args.run == 'bowtie2' and (not self.args.smallRNAIndex or
not self.args.targetRNAIndex):
logger.error('{}'.format('Please specify --smallRNAIndex -si and ' +
'--targetRNAIndex -ti properly...'))
self.exit = True
if self.args.smallRNAIndex:
bts = glob.glob(self.args.smallRNAIndex + '*.bt2')
if not len(bts) >= 6:
logger.error('Something wrong with the {}'.format(
self.args.smallRNAIndex))
logger.error("Can't find all the .bt2 files for that index")
self.exit = True
if self.args.targetRNAIndex:
bts = glob.glob(self.args.targetRNAIndex + '*.bt2')
if not len(bts) >= 6:
logger.error('Something wrong with the {}'.format(
self.args.targetRNAIndex))
logger.error("Can't find all the .bt2 files for that index")
self.exit = True
if self.args.genomeIndex:
bts = glob.glob(self.args.genomeIndex.strip() + '*.bt2')
if not len(bts) >= 6:
logger.error('Something wrong with the {}'.format(
self.args.genomeIndex))
logger.error("Can't find all the .bt2 files for that index")
self.exit = True
if self.args.run == 'tophat' and not self.args.genomeIndex:
logger.error('Please provide genome index if you want to use tophat..')
self.exit = True
if self.exit:
sys.exit()
class Gencode:
"""This class makes sure the required files (sequences, annotation etc.) are
present and in working order. It generates sha256 checksums and autodownloads
the required files from Gencode Genes.
"""
def __init__(self, release=None, user="anonymous", password=None,
path=None, ftpDir=None):
self.downloaded = []
self.release = release
self.reList = ['^GRC.+\.genome\.fa',
'^gencode.+chr_patch_hapl_scaff\.annotation\.gtf',
'^gencode.+pc_transcripts\.fa',
'^gencode.+tRNAs\.gtf',
'^gencode.+lncRNA_transcripts\.fa']
self.host = "ftp.sanger.ac.uk"
self.user = user
self.path = path
self.files = []
self.otherFiles = {}
biotypes = ['snRNA', 'snoRNA', 'misc_RNA', 'tRNA']
for b in biotypes:
self.otherFiles[b] = 'gencode.v{}.{}_transcripts.fa'.format(self.release,
b)
if self.user == "anonymous":
self.password = "anonymous"
else:
self.password = password
ftp_host = ftputil.FTPHost(self.host, self.user, self.password)
self.dir = ftpDir
fileList = ftp_host.listdir(self.dir)
for file in fileList:
for rEx in self.reList:
if re.match(rEx, file):
if not 'primary_assembly' in file:
self.files.append(file)
ftp_host.close()
_files = self.files.copy()
_otherFiles = self.otherFiles.copy()
for file in _files:
logger.debug('Checking %s' % file)
_file = file.rpartition(".")[0]
if os.path.exists(os.path.join(self.path, _file + '.sha256sum')) and \
os.path.exists(os.path.join(self.path, _file)):
with open(os.path.join(self.path, _file + '.sha256sum')) as f:
s = f.readline().rstrip()
_s = self.hash(os.path.join(self.path, _file))
if s == _s:
logger.info("%s is present and verified" % _file)
self.files.remove(file)
else:
logger.warn('%s is downloaded but doesnt match the sha256sum' % _file)
logger.error('Will be downloaded again')
else:
logger.warn('%s will be downloaded' % _file)
for biotype, file in _otherFiles.items():
logger.debug('Checking %s' % file)
_file = file
if os.path.exists(os.path.join(self.path, _file + '.sha256sum')) and \
os.path.exists(os.path.join(self.path, _file)):
with open(os.path.join(self.path, _file + '.sha256sum')) as f:
s = f.readline().rstrip()
_s = self.hash(os.path.join(self.path, _file))
if s == _s:
logger.info("%s is present and verified" % file)
self.otherFiles.pop(biotype)
else:
logger.warn('%s is generated but doesnt match the sha256sum' % file)
logger.error('Will be generated again')
else:
logger.warn('%s will be generated' % file)
def download(self):
if len(self.files) == 0:
logger.info('Gencode files are downloaded and checksum verified...')
else:
with ftputil.FTPHost(self.host, self.user, self.password) as fH:
for file in self.files:
logger.info('Downloading %s from ftp.sanger.ac.uk' % file)
fH.download(self.dir + '/' + file, os.path.join(self.path, file),
callback=None)
self.downloaded.append(file)
_file = file.rpartition(".")[0]
p = subprocess.Popen(['gunzip', os.path.join(self.path, file)])
p.communicate()
sha256sum = self.hash(os.path.join(self.path, _file))
with open(os.path.join(self.path, _file + '.sha256sum'), 'w') as wH:
print(sha256sum, file=wH)
logger.info('Downloading, extraction and hashing of %s finished' %
file)
gtfFiles = glob.glob(os.path.join(self.path, "*.annotation.gtf"))
if len(gtfFiles) == 0:
logger.warn('This release does not contain annotation file or they are '
'not grabbed by regex. Please download it manually from {'
'}'.format(self.dir))
gtfFile = None
elif len(gtfFiles) == 2:
gtfFiles.sort()
gtfFile = gtfFiles[1]
elif len(gtfFiles) == 1:
gtfFile = gtfFiles[0]
genomeFiles = glob.glob(os.path.join(self.path, "*.genome.fa"))
if len(genomeFiles) == 0:
logger.warn('This release does not contain genome file or they are not '
'grabbed by regex. Please download it manually from {'
'}'.format(self.dir))
genomeFile = None
else:
genomeFile = genomeFiles[0]
tRNAgtfFiles = glob.glob(os.path.join(self.path, "*.tRNAs.gtf"))
if len(tRNAgtfFiles) == 0:
logger.warn(('This release does not contain tRNA annotation file or they '
'are not grabbed by regex. Please download it manually from {'
'}'.format(self.dir)))
tRNAgtfFile = None
else:
tRNAgtfFile = tRNAgtfFiles[0]
if len(self.otherFiles) == 0:
logger.info('Other fasta files are generated and checksum verified...')
else:
for biotype, file in self.otherFiles.items():
if biotype == 'tRNA' and (tRNAgtfFile and genomeFile):
fasta = Fasta(genome=genomeFile, gtf=tRNAgtfFile)
fasta.getBiotype(biotype='tRNA', output=os.path.join(self.path,
file))
sha256sum = self.hash(os.path.join(self.path, file))
with open(os.path.join(self.path, file + '.sha256sum'), 'w') as wH:
print(sha256sum, file=wH)
logger.info('Extraction and hashing of %s finished' % file)
elif gtfFile and genomeFile:
fasta = Fasta(genome=genomeFile, gtf=gtfFile)
fasta.getBiotype(biotype=biotype, output=os.path.join(self.path,
file))
sha256sum = self.hash(os.path.join(self.path, file))
with open(os.path.join(self.path, file + '.sha256sum'), 'w') as wH:
print(sha256sum, file=wH)
logger.info('Extraction and hashing of %s finished' % file)
def hash(self, file):
sha256 = hashlib.sha256()
with open(file, 'r+b') as f:
while True:
buf = f.read(8192)
if not buf:
break
sha256.update(buf)
return sha256.hexdigest()
class Mirbase:
"""This class makes sure that requried files (sequences, annotations etc.)
from Mirbase are downloaded. It generates sha256 checksums and autodownloads
the required files
"""
def __init__(self, release=None, user="anonymous", password=None,
path=os.path.join(os.path.expanduser("~"), '.CLASHchimeras'),
organism='hsa',
ftpDir=None):
self.reList = ['^hairpin\.fa\.gz', '^mature\.fa\.gz']
organisms = {}
with ftputil.FTPHost('mirbase.org', 'anonymous', 'anonymous') as fH:
tH, tP = tempfile.mkstemp()
fH.download(os.path.join('pub/mirbase', release, 'organisms.txt.gz'), tP)
for i in gzip.open(tP):
ii = i.decode('utf-8').strip()
if not ii.startswith('#'):
organisms[ii.split("\t")[0]] = ii.split("\t")[2]
self.shortOrganism = organism
self.organism = organisms.get(self.shortOrganism, None)
if not self.organism:
logger.error('{} is not a valid organism name in Mirbase'.format(
self.shortOrganism))
sys.exit()
self.host = 'mirbase.org'
self.release = release
self.user = user
self.path = path
self.files = []
if self.user == "anonymous":
self.password = "anonymous"
else:
self.password = password
ftp_host = ftputil.FTPHost(self.host, self.user, self.password)
self.dir = ftpDir
fileList = ftp_host.listdir(self.dir)
self.files.append('genomes/' + self.shortOrganism + '.gff3')
for file in fileList:
for rEx in self.reList:
if re.match(rEx, file):
self.files.append(file)
ftp_host.close()
_files = self.files.copy()
for file in _files:
logger.debug('Checking %s' % file)
if file.endswith('gz'):
_file = file.rpartition(".")[0]
elif '/' in file:
_file = file.rpartition("/")[2]
if os.path.exists(os.path.join(self.path, _file + '.sha256sum')) and \
os.path.exists(os.path.join(self.path, _file)):
with open(os.path.join(self.path, _file + '.sha256sum')) as f:
s = f.readline().rstrip()
_s = self.hash(os.path.join(self.path, _file))
if s == _s:
logger.info("%s is present and verified" % file)
self.files.remove(file)
else:
logger.warn('%s is downloaded but doesnt match the sha256sum' % file)
logger.error('Must run download method')
else:
logger.warn('%s will be downloaded when you run download method' % file)
def download(self):
if len(self.files) == 0:
logger.debug('Mirbase files are downloaded and checksum verified...')
else:
with ftputil.FTPHost(self.host, self.user, self.password) as fH:
for file in self.files:
logger.info('Downloading %s from mirbase.org' % file)
if "/" in file:
_file = file.rpartition("/")[2]
else:
_file = file
fH.download(self.dir + '/' + file, os.path.join(self.path, _file),
callback=None)
if _file.endswith('.gz'):
__file = _file.rpartition(".")[0]
p = subprocess.Popen(['gunzip', os.path.join(self.path, __file)])
p.communicate()
else:
__file = _file
sha256sum = self.hash(os.path.join(self.path, __file))
with open(os.path.join(self.path, __file + '.sha256sum'), 'w') as wH:
print(sha256sum, file=wH)
logger.debug('Downloading, extraction and hashing of %s finished' %
file)
self.files = []
def hash(self, file):
sha256 = hashlib.sha256()
with open(file, 'r+b') as f:
while True:
buf = f.read(8192)
if not buf:
break
sha256.update(buf)
return sha256.hexdigest()
def process(self):
files = ['hairpin.fa', 'mature.fa']
for file in files:
temp = []
if os.path.exists(os.path.join(self.path, self.shortOrganism + '-' + file)) \
and os.path.exists(os.path.join(self.path,
self.shortOrganism + '-' + file + '.sha256sum')):
with open(os.path.join(self.path, file + '.sha256sum')) as f:
s = f.readline().rstrip()
_s = self.hash(os.path.join(self.path, file))
if s == _s:
logger.info("%s is present and verified" % file)
else:
logger.warn('%s is downloaded but doesnt match the sha256sum' % file)
else:
logger.debug('Extrating %s sequences from %s' % (self.organism, file))
with open(os.path.join(self.path, file)) as iH:
for rec in SeqIO.parse(iH, 'fasta'):
if self.organism in rec.description:
_temp_rec = SeqRecord(id=rec.id, description=rec.description,
seq=rec.seq.back_transcribe())
temp.append(_temp_rec)
SeqIO.write(temp, os.path.join(self.path, self.shortOrganism + '-' + file),
'fasta')
with open(os.path.join(self.path,
self.shortOrganism + '-' + file + '.sha256sum'), 'w') as wH:
print(self.hash(os.path.join(self.path,
self.shortOrganism + '-' + file + '.sha256sum')), file=wH)
class Index:
def __init__(self, root=os.path.join(os.path.expanduser("~"),
'.CLASHchimeras'), bowtieExecutable=None, tophatExecutable=None):
self.created = []
if bowtieExecutable is not None:
logger.info('Bowtie2 path {} provided and will be used'
.format(bowtieExecutable))
self.bowtieExecutable = bowtieExecutable
else:
try:
bowtieVersion = float(subprocess.check_output(['bowtie2', '--version']
).decode('utf-8').split("\n")[0].split()[-1].rpartition(".")[0])
fullBowtieVersion = subprocess.check_output(['bowtie2', '--version']
).decode('utf-8').split("\n")[0].split()[-1]
if bowtieVersion < 2.2:
logger.warn("Please update bowtie2, you can download it from")
logger.warn("http://sourceforge.net/projects/bowtie-bio/files/bowtie2/")
else:
logger.info("Bowtie2 version {} found and will be used"
.format(fullBowtieVersion))
self.bowtieExecutable = 'bowtie2'
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error(textwrap.fill("""
Can't find Bowtie2 in your path. Please install it from
http://sourceforge.net/projects/bowtie-bio/files/bowtie2/
"""))
else:
logger.error('Something wrong with your bowtie2 command')
if tophatExecutable is not None:
logger.info('Tophat2 path {} provided and will be used'
.format(tophatExecutable))
self.tophatExecutable = tophatExecutable
else:
try:
tophatVersion = float(subprocess.check_output(['tophat',
'--version']).decode('utf-8').split(" v")[-1].rpartition(".")[0])
fullTophatVersion = subprocess.check_output(['tophat',
'--version']).decode('utf-8').split(" v")[-1].rstrip()
if tophatVersion < 2:
logger.warn("Please update tophat, you can download it from")
logger.warn("http://ccb.jhu.edu/software/tophat/index.shtml")
else:
logger.info("Tophat version {} found and will be used"
.format(fullTophatVersion))
self.tophatExecutable = 'tophat'
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error("Can't find Tophat in your path. Please install it from")
logger.error("http://ccb.jhu.edu/software/tophat/index.shtml")
else:
logger.error('Something wrong with your tophat command')
self.root = root
def create(self):
stdoutHandle = open(os.path.join(self.root, 'CLASHChimeras-Index.stdout'),
'w')
stderrHandle = open(os.path.join(self.root, 'CLASHChimeras-Index.stderr'),
'w')
indexCreated = False
for fa in glob.glob(os.path.join(self.root, '*.fa')):
indexName = fa.rpartition(".fa")[0]
self.created.append('{} - {}'.format(indexName, green('Bowtie2',
style='bold')))
if 'genome.fa' in fa:
genomeIndex = fa.rpartition(".")[0]
if len(glob.glob(os.path.join(self.root, indexName + '*.bt2'))) == 6:
logger.info('Bowtie2 index for {} found'.format(fa))
else:
try:
logger.info('Generating bowtie2 index for {}'.format(fa))
indexCreated = True
p = subprocess.Popen([self.bowtieExecutable + '-build', fa, indexName],
stdout=stdoutHandle, stderr=stderrHandle)
p.communicate()
logger.info('Bowtie2 index for {} is generated'.format(fa))
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error("Can't find bowtie2-build in your path...")
logger.error("Please make sure bowtie2 is in your path and")
logger.error("bowtie2-build can run from your shell")
else:
logger.error("Something wrong with your bowtie2-build command")
if indexCreated:
logger.warn('The stdout and stderr for bowtie2-build are stored in')
logger.warn('CLASHChimeras-Index.stdout and CLASHChimeras-Index.stderr')
for gtf in glob.glob(os.path.join(self.root, '*.annotation.gtf')):
indexName = gtf.rpartition("/")[-1].rpartition(".")[0]
self.created.append('{} - {}'.format(gtf.rpartition(".")[0], green(
'Tophat2 Transcriptome', style='bold')))
if len(glob.glob(os.path.join(self.root, indexName + '*.bt2'))) == 6:
logger.info('Tophat2 transcriptome index found for {}'.format(gtf))
else:
try:
indexCreated = True
logger.info('Generating tophat transcriptome index for {}'
.format(gtf))
p = subprocess.Popen([self.tophatExecutable, '-G', gtf,
'--transcriptome-index', os.path.join(
self.root, indexName),
genomeIndex], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
logger.info('Tophat2 transcriptome index generated for {}'
.format(gtf))
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error("Can't find tophat in your path...")
logger.error("Please make sure tophat is in your path")
else:
logger.error("Something wrong with your tophat command")
stdoutHandle.close()
stderrHandle.close()
|
import unittest
import numpy as np
from pydl.models.pipeline import Reshaper3D, Reshaper4D, Reshaper5D
class Reshaper3DTestCase(unittest.TestCase):
def test_get_config(self):
r = Reshaper3D(n_steps=5)
config = r.get_config()
expected_config = dict(
n_steps=5,
name='reshaper'
)
self.assertDictEqual(config, expected_config)
def test_from_config(self):
config = dict(
n_steps=5
)
r = Reshaper3D.from_config(config=config)
self.assertIsInstance(r, Reshaper3D)
self.assertEqual(r.n_steps, 5)
def test_to_json(self):
r = Reshaper3D(n_steps=5)
r_json = r.to_json()
expected_json = '{"class_name": "Reshaper3D", "config": {"name": "reshaper", "n_steps": 5}}'
self.assertEqual(expected_json, r_json)
def test_set_params(self):
r = Reshaper3D(n_steps=5)
self.assertEqual(r.n_steps, 5)
r.set_params(n_steps=6)
self.assertEqual(r.n_steps, 6)
def test_transform(self):
r = Reshaper3D(n_steps=5)
x, y = create_dataset()
self.assertEqual(x.shape, (20, 5))
self.assertEqual(y.shape, (20,))
x, y = r.transform(x, y)
self.assertEqual(x.shape, (15, 5, 5))
self.assertEqual(y.shape, (15,))
class Reshaper4DTestCase(unittest.TestCase):
def test_get_config(self):
r = Reshaper4D(n_steps=5, n_seqs=3)
config = r.get_config()
expected_config = dict(
n_steps=5,
n_seqs=3,
name='reshaper'
)
self.assertDictEqual(config, expected_config)
def test_from_config(self):
config = dict(
n_steps=5,
n_seqs=3
)
r = Reshaper4D.from_config(config=config)
self.assertIsInstance(r, Reshaper4D)
self.assertEqual(r.n_steps, 5)
self.assertEqual(r.n_seqs, 3)
def test_to_json(self):
r = Reshaper4D(n_steps=5, n_seqs=3)
r_json = r.to_json()
expected_json = '{"class_name": "Reshaper4D", "config": {"name": "reshaper", "n_steps": 5, "n_seqs": 3}}'
self.assertEqual(expected_json, r_json)
def test_set_params(self):
r = Reshaper4D(n_steps=5, n_seqs=4)
self.assertEqual(r.n_steps, 5)
self.assertEqual(r.n_seqs, 4)
r.set_params(n_steps=6, n_seqs=5)
self.assertEqual(r.n_steps, 6)
self.assertEqual(r.n_seqs, 5)
def test_transform(self):
r = Reshaper4D(n_steps=5, n_seqs=3)
x, y = create_dataset()
self.assertEqual(x.shape, (20, 5))
self.assertEqual(y.shape, (20,))
x, y = r.transform(x, y)
self.assertEqual(x.shape, (12, 3, 5, 5))
self.assertEqual(y.shape, (12,))
class Reshaper5DTestCase(unittest.TestCase):
def test_get_config(self):
r = Reshaper5D(n_steps=5, n_seqs=3)
config = r.get_config()
expected_config = dict(
n_steps=5,
n_seqs=3,
name='reshaper'
)
self.assertDictEqual(config, expected_config)
def test_from_config(self):
config = dict(
n_steps=5,
n_seqs=3
)
r = Reshaper5D.from_config(config=config)
self.assertIsInstance(r, Reshaper5D)
self.assertEqual(r.n_steps, 5)
self.assertEqual(r.n_seqs, 3)
def test_to_json(self):
r = Reshaper5D(n_steps=5, n_seqs=3)
r_json = r.to_json()
expected_json = '{"class_name": "Reshaper5D", "config": {"name": "reshaper", "n_steps": 5, "n_seqs": 3}}'
self.assertEqual(expected_json, r_json)
def test_set_params(self):
r = Reshaper5D(n_steps=5, n_seqs=4)
self.assertEqual(r.n_steps, 5)
self.assertEqual(r.n_seqs, 4)
r.set_params(n_steps=6, n_seqs=5)
self.assertEqual(r.n_steps, 6)
self.assertEqual(r.n_seqs, 5)
def test_transform(self):
r = Reshaper5D(n_steps=5, n_seqs=3)
x, y = create_dataset()
self.assertEqual(x.shape, (20, 5))
self.assertEqual(y.shape, (20,))
x, y = r.transform(x, y)
self.assertEqual(x.shape, (12, 3, 1, 5, 5))
self.assertEqual(y.shape, (12,))
def create_dataset():
x = [np.random.random_sample((20, 1)) for _ in range(5)]
y = np.random.random_sample(20)
return np.hstack(x), y
|
from .sub_resource import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool: :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings: :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:param path_rules: Path rule of URL path map resource.
:type path_rules: list of :class:`ApplicationGatewayPathRule
<azure.mgmt.network.v2016_09_01.models.ApplicationGatewayPathRule>`
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, path_rules=None, provisioning_state=None, name=None, etag=None):
super(ApplicationGatewayUrlPathMap, self).__init__(id=id)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
"""
Base class that contains common methods
"""
class CorpusBase:
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.rindex("\t")+1:].strip() # Strip metadata (timestamps, speaker names)
lines.append({"text": l})
return lines
def getConversations(self):
return self.conversations
|
"""
Shows an empty window.
"""
from pylibui.core import App
from pylibui.controls import Window
class MyWindow(Window):
def onClose(self, data):
super().onClose(data)
app.stop()
app = App()
window = MyWindow('Window', 800, 600)
window.setMargined(True)
window.show()
app.start()
app.close()
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_sergeant_major_bothan_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bothan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from decimal import Decimal
PLUGIN_NAME = "Highlighter"
def calculateGrades(gradeList):
'''
Basic (and default) late calculator only colors late submissions red
so that they can be noticed in the gradebook
'''
for assignment in gradeList:
for problem in assignment:
#check for no submission and skip
if problem is None:
continue
if problem['isLate']:
problem['highlight'] = "red"
return gradeList
|
class SessionStorage:
def get(self, key, default=None):
raise NotImplementedError()
def set(self, key, value, ttl=None):
raise NotImplementedError()
def delete(self, key):
raise NotImplementedError()
def __getitem__(self, key):
self.get(key)
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key):
self.delete(key)
|
""" This module contains the PyGTKCodeBuffer-class. This class is a
specialisation of the gtk.TextBuffer and enables syntax-highlighting for
PyGTK's TextView-widget.
To use the syntax-highlighting feature you have load a syntax-definition or
specify your own. To load one please read the docs for the SyntaxLoader()
class. """
import gtk
import pango
import re
import sys
import os.path
import xml.sax
import imp
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import unescape
__version__ = "1.0RC2"
__author__ = "Hannes Matuschek <hmatuschek@gmail.com>"
DEFAULT_STYLES = {
'DEFAULT': {'font': 'monospace'},
'comment': {'foreground': '#0000FF'},
'preprocessor': {'foreground': '#A020F0'},
'keyword': {'foreground': '#A52A2A',
'weight': pango.WEIGHT_BOLD},
'special': {'foreground': 'turquoise'},
'mark1': {'foreground': '#008B8B'},
'mark2': {'foreground': '#6A5ACD'},
'string': {'foreground': '#FF00FF'},
'number': {'foreground': '#FF00FF'},
'datatype': {'foreground': '#2E8B57',
'weight': pango.WEIGHT_BOLD},
'function': {'foreground': '#008A8C'},
'link': {'foreground': '#0000FF',
'underline': pango.UNDERLINE_SINGLE}}
def _main_is_frozen():
""" Internal used function. """
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
if _main_is_frozen():
this_module_path = os.path.dirname(sys.executable)
else:
this_module_path = os.path.abspath(os.path.dirname(__file__))
SYNTAX_PATH = [ os.path.join('.', 'syntax'),
this_module_path,
os.path.join(os.path.expanduser('~'),".pygtkcodebuffer"),
os.path.join(sys.prefix,"share","pygtkcodebuffer","syntax")]
DEBUG_FLAG = False
def _log_debug(msg):
if not DEBUG_FLAG:
return
sys.stderr.write("DEBUG: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_warn(msg):
sys.stderr.write("WARN: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_error(msg):
sys.stderr.write("ERROR: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def add_syntax_path(path_or_list):
""" This function adds one (string) or many (list of strings) paths to the
global search-paths for syntax-files. """
global SYNTAX_PATH
# handle list of strings
if isinstance(path_or_list, (list, tuple)):
for i in range(len(path_or_list)):
SYNTAX_PATH.insert(0, path_or_list[-i])
# handle single string
elif isinstance(path_or_list, basestring):
SYNTAX_PATH.insert(0, path_or_list)
# handle attr-error
else:
raise TypeError, "Argument must be path-string or list of strings"
class Pattern:
""" More or less internal used class representing a pattern. You may use
this class to "hard-code" your syntax-definition. """
def __init__(self, regexp, style="DEFAULT", group=0, flags=""):
""" The constructor takes at least on argument: the regular-expression.
The optional kwarg style defines the style applied to the string
matched by the regexp.
The kwarg group may be used to define which group of the regular
expression will be used for highlighting (Note: This means that only
the selected group will be highlighted but the complete pattern must
match!)
The optional kwarg flags specifies flags for the regular expression.
Look at the Python lib-ref for a list of flags and there meaning."""
# assemble re-flag
flags += "ML"; flag = 0
_log_debug("init rule %s -> %s (%s)"%(regexp, style, flags))
for char in flags:
if char == 'M': flag |= re.M
if char == 'L': flag |= re.L
if char == 'S': flag |= re.S
if char == 'I': flag |= re.I
if char == 'U': flag |= re.U
if char == 'X': flag |= re.X
# compile re
try: self._regexp = re.compile(regexp, flag)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self._group = group
self.tag_name = style
def __call__(self, txt, start, end):
m = self._regexp.search(txt)
if not m: return None
mstart, mend = m.start(self._group), m.end(self._group)
s = start.copy(); s.forward_chars(mstart)
e = start.copy(); e.forward_chars(mend)
return (s,e)
class KeywordList(Pattern):
""" This class may be used for hard-code a syntax-definition. It specifies
a pattern for a keyword-list. This simplifies the definition of
keyword-lists. """
def __init__(self, keywords, style="keyword", flags=""):
""" The constructor takes at least on argument: A list of strings
specifying the keywords to highlight.
The optional kwarg style specifies the style used to highlight these
keywords.
The optional kwarg flags specifies the flags for the
(internal generated) regular-expression. """
regexp = "(?:\W|^)(%s)\W"%("|".join(keywords),)
Pattern.__init__(self, regexp, style, group=1, flags=flags)
class String:
""" This class may be used to hard-code a syntax-definition. It simplifies
the definition of a "string". A "string" is something that consists of
a start-pattern and an end-pattern. The end-pattern may be content of
the string if it is escaped. """
def __init__(self, starts, ends, escape=None, style="string"):
""" The constructor needs at least two arguments: The start- and
end-pattern.
The optional kwarg escape specifies a escape-sequence escaping the
end-pattern.
The optional kwarg style specifies the style used to highlight the
string. """
try:
self._starts = re.compile(starts)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
if escape:
end_exp = "[^%(esc)s](?:%(esc)s%(esc)s)*%(end)s"
end_exp = end_exp%{'esc':escape*2,'end':ends}
else:
end_exp = ends
try:
self._ends = re.compile(end_exp)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self.tag_name = style
def __call__(self, txt, start, end):
start_match = self._starts.search(txt)
if not start_match: return
start_it = start.copy()
start_it.forward_chars(start_match.start(0))
end_it = end.copy()
end_match = self._ends.search(txt, start_match.end(0)-1)
if end_match:
end_it.set_offset(start.get_offset()+end_match.end(0))
return start_it, end_it
class LanguageDefinition:
""" This class is a container class for all rules (Pattern, KeywordList,
...) specifying the language. You have to used this class if you like
to hard-code your syntax-definition. """
def __init__(self, rules):
""" The constructor takes only one argument: A list of rules (i.e
Pattern, KeywordList and String). """
self._grammar = rules
self._styles = dict()
def __call__(self, buf, start, end=None):
# if no end given -> end of buffer
if not end: end = buf.get_end_iter()
mstart = mend = end
mtag = None
txt = buf.get_slice(start, end)
# search min match
for rule in self._grammar:
# search pattern
m = rule(txt, start, end)
if not m: continue
# prefer match with smallest start-iter
if m[0].compare(mstart) < 0:
mstart, mend = m
mtag = rule.tag_name
continue
if m[0].compare(mstart)==0 and m[1].compare(mend)>0:
mstart, mend = m
mtag = rule.tag_name
continue
return (mstart, mend, mtag)
def get_styles(self):
return self._styles
class SyntaxLoader(ContentHandler, LanguageDefinition):
""" This class loads a syntax definition. There have to be a file
named LANGUAGENAME.xml in one of the directories specified in the
global path-list. You may add a directory using the add_syntax_path()
function. """
# some translation-tables for the style-defs:
style_weight_table = {'ultralight': pango.WEIGHT_ULTRALIGHT,
'light': pango.WEIGHT_LIGHT,
'normal': pango.WEIGHT_NORMAL,
'bold': pango.WEIGHT_BOLD,
'ultrabold': pango.WEIGHT_ULTRABOLD,
'heavy': pango.WEIGHT_HEAVY}
style_variant_table = {'normal': pango.VARIANT_NORMAL,
'smallcaps': pango.VARIANT_SMALL_CAPS}
style_underline_table = {'none': pango.UNDERLINE_NONE,
'single': pango.UNDERLINE_SINGLE,
'double': pango.UNDERLINE_DOUBLE}
style_style_table = {'normal': pango.STYLE_NORMAL,
'oblique': pango.STYLE_OBLIQUE,
'italic': pango.STYLE_ITALIC}
style_scale_table = {
'xx_small': pango.SCALE_XX_SMALL,
'x_small': pango.SCALE_X_SMALL,
'small': pango.SCALE_SMALL,
'medium': pango.SCALE_MEDIUM,
'large': pango.SCALE_LARGE,
'x_large': pango.SCALE_X_LARGE,
'xx_large': pango.SCALE_XX_LARGE,
}
def __init__(self, lang_name):
""" The constructor takes only one argument: the language name.
The constructor tries to load the syntax-definition from a
syntax-file in one directory of the global path-list.
An instance of this class IS a LanguageDefinition. You can pass it
to the constructor of the CodeBuffer class. """
LanguageDefinition.__init__(self, [])
ContentHandler.__init__(self)
# search for syntax-files:
fname = None
for syntax_dir in SYNTAX_PATH:
fname = os.path.join(syntax_dir, "%s.xml"%lang_name)
if os.path.isfile(fname): break
_log_debug("Loading syntaxfile %s"%fname)
if not os.path.isfile(fname):
raise Exception("No snytax-file for %s found!"%lang_name)
xml.sax.parse(fname, self)
# Dispatch start/end - document/element and chars
def startDocument(self):
self.__stack = []
def endDocument(self):
del self.__stack
def startElement(self, name, attr):
self.__stack.append( (name, attr) )
if hasattr(self, "start_%s"%name):
handler = getattr(self, "start_%s"%name)
handler(attr)
def endElement(self, name):
if hasattr(self, "end_%s"%name):
handler = getattr(self, "end_%s"%name)
handler()
del self.__stack[-1]
def characters(self, txt):
if not self.__stack: return
name, attr = self.__stack[-1]
if hasattr(self, "chars_%s"%name):
handler = getattr(self, "chars_%s"%name)
handler(txt)
# Handle regexp-patterns
def start_pattern(self, attr):
self.__pattern = ""
self.__group = 0
self.__flags = ''
self.__style = attr['style']
if 'group' in attr.keys(): self.__group = int(attr['group'])
if 'flags' in attr.keys(): self.__flags = attr['flags']
def end_pattern(self):
rule = Pattern(self.__pattern, self.__style, self.__group, self.__flags)
self._grammar.append(rule)
del self.__pattern
del self.__group
del self.__flags
del self.__style
def chars_pattern(self, txt):
self.__pattern += unescape(txt)
# handle keyword-lists
def start_keywordlist(self, attr):
self.__style = "keyword"
self.__flags = ""
if 'style' in attr.keys():
self.__style = attr['style']
if 'flags' in attr.keys():
self.__flags = attr['flags']
self.__keywords = []
def end_keywordlist(self):
kwlist = KeywordList(self.__keywords, self.__style, self.__flags)
self._grammar.append(kwlist)
del self.__keywords
del self.__style
del self.__flags
def start_keyword(self, attr):
self.__keywords.append("")
def end_keyword(self):
if not self.__keywords[-1]:
del self.__keywords[-1]
def chars_keyword(self, txt):
parent,pattr = self.__stack[-2]
if not parent == "keywordlist": return
self.__keywords[-1] += unescape(txt)
#handle String-definitions
def start_string(self, attr):
self.__style = "string"
self.__escape = None
if 'escape' in attr.keys():
self.__escape = attr['escape']
if 'style' in attr.keys():
self.__style = attr['style']
self.__start_pattern = ""
self.__end_pattern = ""
def end_string(self):
strdef = String(self.__start_pattern, self.__end_pattern,
self.__escape, self.__style)
self._grammar.append(strdef)
del self.__style
del self.__escape
del self.__start_pattern
del self.__end_pattern
def chars_starts(self, txt):
self.__start_pattern += unescape(txt)
def chars_ends(self, txt):
self.__end_pattern += unescape(txt)
# handle style
def start_style(self, attr):
self.__style_props = dict()
self.__style_name = attr['name']
def end_style(self):
self._styles[self.__style_name] = self.__style_props
del self.__style_props
del self.__style_name
def start_property(self, attr):
self.__style_prop_name = attr['name']
def chars_property(self, value):
value.strip()
# convert value
if self.__style_prop_name in ['font','foreground','background',]:
pass
elif self.__style_prop_name == 'variant':
if not value in self.style_variant_table.keys():
Exception("Unknown style-variant: %s"%value)
value = self.style_variant_table[value]
elif self.__style_prop_name == 'underline':
if not value in self.style_underline_table.keys():
Exception("Unknown underline-style: %s"%value)
value = self.style_underline_table[value]
elif self.__style_prop_name == 'scale':
if not value in self.style_scale_table.keys():
Exception("Unknown scale-style: %s"%value)
value = self.style_scale_table[value]
elif self.__style_prop_name == 'weight':
if not value in self.style_weight_table.keys():
Exception("Unknown style-weight: %s"%value)
value = self.style_weight_table[value]
elif self.__style_prop_name == 'style':
if not value in self.style_style_table[value]:
Exception("Unknwon text-style: %s"%value)
value = self.style_style_table[value]
else:
raise Exception("Unknown style-property %s"%self.__style_prop_name)
# store value
self.__style_props[self.__style_prop_name] = value
class CodeBuffer(gtk.TextBuffer):
""" This class extends the gtk.TextBuffer to support syntax-highlighting.
You can use this class like a normal TextBuffer. """
def __init__(self, table=None, lang=None, styles={}):
""" The constructor takes 3 optional arguments.
table specifies a tag-table associated with the TextBuffer-instance.
This argument will be passed directly to the constructor of the
TextBuffer-class.
lang specifies the language-definition. You have to load one using
the SyntaxLoader-class or you may hard-code your syntax-definition
using the LanguageDefinition-class.
styles is a dictionary used to extend or overwrite the default styles
provided by this module (DEFAULT_STYLE) and any language specific
styles defined by the LanguageDefinition. """
gtk.TextBuffer.__init__(self, table)
# default styles
self.styles = DEFAULT_STYLES
# update styles with lang-spec:
if lang:
self.styles.update(lang.get_styles())
# update styles with user-defined
self.styles.update(styles)
# create tags
for name, props in self.styles.items():
style = dict(self.styles['DEFAULT']) # take default
style.update(props) # and update with props
self.create_tag(name, **style)
# store lang-definition
self._lang_def = lang
self.connect_after("insert-text", self._on_insert_text)
self.connect_after("delete-range", self._on_delete_range)
self.connect('apply-tag', self._on_apply_tag)
self._apply_tags = False
def _on_apply_tag(self, buf, tag, start, end):
# FIXME This is a hack! It allows apply-tag only while
# _on_insert_text() and _on_delete_range()
if not self._apply_tags:
self.emit_stop_by_name('apply-tag')
return True
_log_debug("tag \"%s\" as %s"%(self.get_slice(start,end), tag.get_property("name")))
def _on_insert_text(self, buf, it, text, length):
# if no syntax defined -> nop
if not self._lang_def: return False
it = it.copy()
it.backward_chars(length)
if not it.begins_tag():
it.backward_to_tag_toggle(None)
_log_debug("Not tag-start -> moved iter to %i (%s)"%(it.get_offset(), it.get_char()))
if it.begins_tag(self.get_tag_table().lookup("DEFAULT")):
it.backward_to_tag_toggle(None)
_log_debug("Iter at DEFAULT-start -> moved to %i (%s)"%(it.get_offset(), it.get_char()))
self._apply_tags = True
self.update_syntax(it)
self._apply_tags = False
def _on_delete_range(self, buf, start, end):
# if no syntax defined -> nop
if not self._lang_def: return False
start = start.copy()
if not start.begins_tag():
start.backward_to_tag_toggle(None)
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_syntax(self, start, end=None):
""" More or less internal used method to update the
syntax-highlighting. """
# if no lang set
if not self._lang_def: return
_log_debug("Update syntax from %i"%start.get_offset())
# if not end defined
if not end: end = self.get_end_iter()
# We do not use recursion -> long files exceed rec-limit!
finished = False
while not finished:
# search first rule matching txt[start..end]
mstart, mend, tagname = self._lang_def(self, start, end)
# optimisation: if mstart-mend is allready tagged with tagname
# -> finished
if tagname: #if something found
tag = self.get_tag_table().lookup(tagname)
if mstart.begins_tag(tag) and mend.ends_tag(tag) and not mstart.equal(start):
self.remove_all_tags(start,mstart)
self.apply_tag_by_name("DEFAULT", start, mstart)
_log_debug("Optimized: Found old tag at %i (%s)"%(mstart.get_offset(), mstart.get_char()))
# finish
finished = True
continue
# remove all tags from start..mend (mend == buffer-end if no match)
self.remove_all_tags(start, mend)
# make start..mstart = DEFAUL (mstart == buffer-end if no match)
if not start.equal(mstart):
_log_debug("Apply DEFAULT")
self.apply_tag_by_name("DEFAULT", start, mstart)
# nothing found -> finished
if not tagname:
finished = True
continue
# apply tag
_log_debug("Apply %s"%tagname)
self.apply_tag_by_name(tagname, mstart, mend)
start = mend
if start == end:
finished = True
continue
def reset_language(self, lang_def):
""" Reset the currently used language-definition. """
# remove all tags from complete text
start = self.get_start_iter()
self.remove_all_tags(start, self.get_end_iter())
# store lexer
self._lang_def = lang_def
# update styles from lang_def:
if self._lang_def:
self.update_styles(self._lang_def.get_styles())
# and ...
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_styles(self, styles):
""" Update styles. This method may be used to reset any styles at
runtime. """
self.styles.update(styles)
table = self.get_tag_table()
for name, props in styles.items():
style = self.styles['DEFAULT']
style.update(props)
# if tagname is unknown:
if not table.lookup(name):
_log_debug("Create tag: %s (%s)"%(name, style))
self.create_tag(name, **style)
else: # update tag
tag = table.lookup(name)
_log_debug("Update tag %s with (%s)"%(name, style))
map(lambda i: tag.set_property(i[0],i[1]), style.items())
|
from pandac.PandaModules import *
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
class Walk(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('Walk')
def __init__(self, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.fsm = ClassicFSM.ClassicFSM('Walk', [State.State('off', self.enterOff, self.exitOff, ['walking', 'swimming', 'slowWalking']),
State.State('walking', self.enterWalking, self.exitWalking, ['swimming', 'slowWalking']),
State.State('swimming', self.enterSwimming, self.exitSwimming, ['walking', 'slowWalking']),
State.State('slowWalking', self.enterSlowWalking, self.exitSlowWalking, ['walking', 'swimming'])], 'off', 'off')
self.fsm.enterInitialState()
self.IsSwimSoundAudible = 0
self.swimSoundPlaying = 0
def load(self):
pass
def unload(self):
del self.fsm
def enter(self, slowWalk = 0):
base.localAvatar.startPosHprBroadcast()
base.localAvatar.startBlink()
base.localAvatar.attachCamera()
shouldPush = 1
if len(base.localAvatar.cameraPositions) > 0:
shouldPush = not base.localAvatar.cameraPositions[base.localAvatar.cameraIndex][4]
base.localAvatar.startUpdateSmartCamera(shouldPush)
base.localAvatar.showName()
base.localAvatar.collisionsOn()
base.localAvatar.startGlitchKiller()
base.localAvatar.enableAvatarControls()
def exit(self):
self.fsm.request('off')
self.ignore(base.JUMP)
base.localAvatar.disableAvatarControls()
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopPosHprBroadcast()
base.localAvatar.stopBlink()
base.localAvatar.detachCamera()
base.localAvatar.stopGlitchKiller()
base.localAvatar.collisionsOff()
base.localAvatar.controlManager.placeOnFloor()
def enterOff(self):
pass
def exitOff(self):
pass
def enterWalking(self):
if base.localAvatar.hp > 0:
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedNormal()
base.localAvatar.applyBuffs()
else:
self.fsm.request('slowWalking')
def exitWalking(self):
base.localAvatar.stopTrackAnimToSpeed()
def setSwimSoundAudible(self, IsSwimSoundAudible):
self.IsSwimSoundAudible = IsSwimSoundAudible
if IsSwimSoundAudible == 0 and self.swimSoundPlaying:
self.swimSound.stop()
self.swimSoundPlaying = 0
def enterSwimming(self, swimSound):
base.localAvatar.setWalkSpeedNormal()
base.localAvatar.applyBuffs()
self.swimSound = swimSound
self.swimSoundPlaying = 0
base.localAvatar.b_setAnimState('swim', base.localAvatar.animMultiplier)
base.localAvatar.startSleepSwimTest()
taskMgr.add(self.__swim, 'localToonSwimming')
def exitSwimming(self):
taskMgr.remove('localToonSwimming')
self.swimSound.stop()
del self.swimSound
self.swimSoundPlaying = 0
base.localAvatar.stopSleepSwimTest()
def __swim(self, task):
speed = base.mouseInterfaceNode.getSpeed()
if speed == 0 and self.swimSoundPlaying:
self.swimSoundPlaying = 0
self.swimSound.stop()
elif speed > 0 and self.swimSoundPlaying == 0 and self.IsSwimSoundAudible:
self.swimSoundPlaying = 1
base.playSfx(self.swimSound, looping=1)
return Task.cont
def enterSlowWalking(self):
self.accept(base.localAvatar.uniqueName('positiveHP'), self.__handlePositiveHP)
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedSlow()
def __handlePositiveHP(self):
self.fsm.request('walking')
def exitSlowWalking(self):
base.localAvatar.stopTrackAnimToSpeed()
self.ignore(base.localAvatar.uniqueName('positiveHP'))
|
"""String interpolation routines, i.e. the splitting up a given text into some
parts that are literal strings, and others that are Python expressions.
"""
from itertools import chain
import os
import re
from tokenize import PseudoToken
from genshi.core import TEXT
from genshi.template.base import TemplateSyntaxError, EXPR
from genshi.template.eval import Expression
__all__ = ['interpolate']
__docformat__ = 'restructuredtext en'
NAMESTART = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
NAMECHARS = NAMESTART + '.0123456789'
PREFIX = '$'
token_re = re.compile('%s|%s(?s)' % (
r'[uU]?[rR]?("""|\'\'\')((?<!\\)\\\1|.)*?\1',
PseudoToken
))
def interpolate(text, filepath=None, lineno=-1, offset=0, lookup='strict'):
"""Parse the given string and extract expressions.
This function is a generator that yields `TEXT` events for literal strings,
and `EXPR` events for expressions, depending on the results of parsing the
string.
>>> for kind, data, pos in interpolate("hey ${foo}bar"):
... print(('%s %r' % (kind, data)))
TEXT 'hey '
EXPR Expression('foo')
TEXT 'bar'
:param text: the text to parse
:param filepath: absolute path to the file in which the text was found
(optional)
:param lineno: the line number at which the text was found (optional)
:param offset: the column number at which the text starts in the source
(optional)
:param lookup: the variable lookup mechanism; either "lenient" (the
default), "strict", or a custom lookup class
:return: a list of `TEXT` and `EXPR` events
:raise TemplateSyntaxError: when a syntax error in an expression is
encountered
"""
pos = [filepath, lineno, offset]
textbuf = []
textpos = None
for is_expr, chunk in chain(lex(text, pos, filepath), [(True, '')]):
if is_expr:
if textbuf:
yield TEXT, ''.join(textbuf), textpos
del textbuf[:]
textpos = None
if chunk:
try:
expr = Expression(chunk.strip(), pos[0], pos[1],
lookup=lookup)
yield EXPR, expr, tuple(pos)
except SyntaxError as err:
raise TemplateSyntaxError(err, filepath, pos[1],
pos[2] + (err.offset or 0))
else:
textbuf.append(chunk)
if textpos is None:
textpos = tuple(pos)
if '\n' in chunk:
lines = chunk.splitlines()
pos[1] += len(lines) - 1
pos[2] += len(lines[-1])
else:
pos[2] += len(chunk)
def lex(text, textpos, filepath):
offset = pos = 0
end = len(text)
escaped = False
while 1:
if escaped:
offset = text.find(PREFIX, offset + 2)
escaped = False
else:
offset = text.find(PREFIX, pos)
if offset < 0 or offset == end - 1:
break
next = text[offset + 1]
if next == '{':
if offset > pos:
yield False, text[pos:offset]
pos = offset + 2
level = 1
while level:
match = token_re.match(text, pos)
if match is None or not match.group():
# if there isn't a match or the match is the empty
# string, we're not going to match up braces ever
raise TemplateSyntaxError('invalid syntax', filepath,
*textpos[1:])
pos = match.end()
tstart, tend = match.regs[3]
token = text[tstart:tend]
if token == '{':
level += 1
elif token == '}':
level -= 1
yield True, text[offset + 2:pos - 1]
elif next in NAMESTART:
if offset > pos:
yield False, text[pos:offset]
pos = offset
pos += 1
while pos < end:
char = text[pos]
if char not in NAMECHARS:
break
pos += 1
yield True, text[offset + 1:pos].strip()
elif not escaped and next == PREFIX:
if offset > pos:
yield False, text[pos:offset]
escaped = True
pos = offset + 1
else:
yield False, text[pos:offset + 1]
pos = offset + 1
if pos < end:
yield False, text[pos:]
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0007_merge_20170614_1019'),
]
operations = [
migrations.RemoveField(
model_name='questionnaire',
name='description',
),
migrations.RemoveField(
model_name='questionnaire',
name='status',
),
]
|
import abc
import sys
if sys.version_info == (2, 7):
import httplib as HttpStatusCode
elif sys.version_info >= (3, 0):
import http.client as HttpStatusCode
import requests
from nose.tools import (assert_equal,
assert_in,
assert_is_instance,
assert_true)
__all__ = ['Base', 'RootBase']
class RootBase(metaclass=abc.ABCMeta):
api_version = 'v1'
root_url = 'http://127.0.0.1:5000/' + api_version + '/'
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
@property
@abc.abstractmethod
def endpoint_name(self):
pass
@property
def url(self):
url = self.root_url
if self.endpoint_name.lower().strip() != 'root':
url += self.endpoint_name
if not url.endswith('/'):
url += '/'
return url
def test_get_collection_returns_200_status_code(self):
response = requests.head(url=self.url, headers=self.headers)
assert_true(response.ok)
def test_get_body_has_data(self):
response = requests.get(url=self.url, headers=self.headers)
assert_in('data', response.json())
def test_get_body_has_urls(self):
response = requests.get(url=self.url, headers=self.headers)
assert_in('urls', response.json())
def test_get_body_has_self_url(self):
response = requests.get(url=self.url, headers=self.headers)
assert_equal(self.url, response.json()['urls']['self'])
class Base(RootBase):
def __init__(self):
# Check this attribute to determine if a teardown is necessary.
self.self_url = ''
@property
@abc.abstractmethod
def data(self):
pass
def test_get_subresources_are_discoverable(self):
response = requests.get(url=self.root_url, headers=self.headers)
discovered_url = response.json()['data']['subresources'].get(
self.endpoint_name,
'')
assert_equal(discovered_url, self.url)
def test_get_id_attribute_is_of_type_string(self):
self.self_url = requests.post(url=self.url,
headers=self.headers,
json=self.data).json()['urls']['self']
response = requests.get(url=self.self_url, headers=self.headers)
assert_is_instance(response.json()['data']['id'], str)
def test_get_nonexistent_resource_returns_404_status_code(self):
response = requests.get(url=self.url + 'foo', headers=self.headers)
assert_equal(response.status_code, HttpStatusCode.NOT_FOUND)
def test_post_returns_201_status_code(self):
response = requests.post(url=self.url,
headers=self.headers,
json=self.data)
self.self_url = response.json()['urls']['self']
assert_equal(response.status_code, HttpStatusCode.CREATED)
def test_post_returns_location_header(self):
response = requests.post(url=self.url,
headers=self.headers,
json=self.data)
self.self_url = response.json()['urls']['self']
assert_equal(response.headers['Location'], self.self_url)
def test_post_body_has_data_not_null(self):
response = requests.post(url=self.url,
headers=self.headers,
json=self.data)
self.self_url = response.json()['urls']['self']
assert_true(response.json()['data'])
def test_post_body_has_self_url(self):
response = requests.post(url=self.url,
headers=self.headers,
json=self.data)
self.self_url = response.json()['urls']['self']
assert_true(response.json()['urls']['self'])
def test_delete_nonexistent_resource_returns_404_status_code(self):
response = requests.delete(url=self.url + 'foo', headers=self.headers)
assert_equal(response.status_code, HttpStatusCode.NOT_FOUND)
def teardown(self):
if self.self_url:
requests.delete(url=self.self_url, headers=self.headers)
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('social', '0004_auto_20161105_1920'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='social.Comment'),
),
migrations.AlterField(
model_name='comment',
name='target_object_owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments_by_others', to=settings.AUTH_USER_MODEL),
),
]
|
"""
PyMF Simplex Volume Maximization [1]
SIVM_SGREEDY: class for greedy-search SiVM
[1] C. Thurau, K. Kersting, and C. Bauckhage. Yes We Can - Simplex Volume
Maximization for Descriptive Web-Scale Matrix Factorization. In Proc. Int.
Conf. on Information and Knowledge Management. ACM. 2010.
"""
import numpy as np
import time
from .dist import *
from .vol import *
from .sivm_search import SIVM_SEARCH
__all__ = ["SIVM_SGREEDY"]
class SIVM_SGREEDY(SIVM_SEARCH):
"""
SIVM(data, num_bases=4, niter=100, show_progress=True, compW=True)
Simplex Volume Maximization. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. H is restricted to convexity. W is iteratively
found by maximizing the volume of the resulting simplex (see [1]). A solution
is found by employing a simple greedy max-vol strategy.
Parameters
----------
data : array_like
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
niter: int, optional
Number of iterations of the alternating optimization.
100 (default)
show_progress: bool, optional
Print some extra information
False (default)
compW: bool, optional
Compute W (True) or only H (False). Useful for using basis vectors
from another convexity constrained matrix factorization function
(e.g. svmnmf) (if set to "True" niter can be set to "1")
compH: bool, optional
Compute H (True) or only H (False). Useful for using precomputed
basis vectors.
dist_measure: string, optional
The distance measure for finding the next best candidate that
maximizes the simplex volume ['l2','l1','cosine','sparse_graph_l2']
'l2' (default)
optimize_lower_bound: bool, optional
Use the alternative selection criterion that optimizes the lower
bound (see [1])
False (default)
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after applying .factoriz())
Example
-------
Applying SIVM to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> sivm_mdl = SIVM_SGREEDY(data, num_bases=2, niter=10)
>>> sivm_mdl.initialization()
>>> sivm_mdl.factorize()
The basis vectors are now stored in sivm_mdl.W, the coefficients in sivm_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to sivm_mdl.W, and set compW to False:
>>> data = np.array([[1.5, 1.3], [1.2, 0.3]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> sivm_mdl = SIVM_SGREEDY(data, num_bases=2, niter=1, compW=False)
>>> sivm_mdl.initialization()
>>> sivm_mdl.W = W
>>> sivm_mdl.factorize()
The result is a set of coefficients sivm_mdl.H, s.t. data = W * sivm_mdl.H.
"""
def update_w(self):
# compute distance matrix -> requiresd for the volume
self.init_sivm()
next_sel = list([self.select[0]])
self.select = []
self._v = []
self._t = []
stime = time.time()
for iter in range(self._num_bases-1):
# add new selections to openset
next_sel = list(np.sort(next_sel))
D = pdist(self.data[:, next_sel], self.data[:, next_sel])
V = np.zeros(self.data.shape[1])
d = np.zeros((D.shape[0]+1,D.shape[1]+1))
d[:D.shape[0], :D.shape[1]] = D[:,:]
for i in range(self.data.shape[1]):
# create a temp selection
dtmp = l2_distance(self.data[:,next_sel], self.data[:,i:i+1])
d[:-1,-1] = dtmp
d[-1,:-1] = dtmp
# compute volume for temp selection
V[i] = cmdet(d)
next_index = np.argmax(V)
next_sel.append(next_index)
self._v.append(np.max(V))
self._logger.info('Iter:' + str(iter))
self._logger.info('Current selection:' + str(next_sel))
self._logger.info('Current volume:' + str(self._v[-1]))
self._t.append(time.time() - stime)
# update some values ...
self.select = list(next_sel)
self.W = self.data[:, self.select]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import datetime
from markupupdowndown import config
from markupupdowndown.generators import render_theme_skel
def init_subparser(subparsers):
help_msg = 'create new theme'
parser_content = subparsers.add_parser('theme', help=help_msg)
parser_content.set_defaults(func=command_theme)
parser_content.add_argument('slug', action='store',
help="path name for content")
def command_theme(args):
if not config.in_themes_dir():
print('Error: not in markupupdowndown theme directory')
else:
render_theme_skel(args.slug)
|
def check_types(oktypes, o):
if not isinstance(o, oktypes):
raise TypeError(f"Wrong element type: object {o}, type {type(o)}")
|
from ferrox.model.db import BaseTable
from sqlalchemy import Column, types
from sqlalchemy.orm import object_mapper, relation
from sqlalchemy.ext.declarative import DeclarativeMeta
class Discussion(BaseTable):
__tablename__ = 'discussions'
id = Column(types.Integer, primary_key=True)
comment_count = Column(types.Integer, nullable=False, default=0)
def get_parent_post(self):
"""Returns this discussion's associated news/journal/submission."""
if not hasattr(self, '_parent_post'):
self._parent_post = (self.news
or self.journal_entry
or self.submission)[0]
return self._parent_post
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_daclif_gallamby_q3_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_corl_n","daclif_gallamby_q3_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import os
import types
import sys
import codecs
import tempfile
import tkFileDialog
import tkMessageBox
import re
from Tkinter import *
from SimpleDialog import SimpleDialog
from configHandler import idleConf
try:
from codecs import BOM_UTF8
except ImportError:
# only available since Python 2.3
BOM_UTF8 = '\xef\xbb\xbf'
try:
import locale
locale.setlocale(locale.LC_CTYPE, "")
except (ImportError, locale.Error):
pass
filesystemencoding = sys.getfilesystemencoding()
encoding = "ascii"
if sys.platform == 'win32':
# On Windows, we could use "mbcs". However, to give the user
# a portable encoding name, we need to find the code page
try:
encoding = locale.getdefaultlocale()[1]
codecs.lookup(encoding)
except LookupError:
pass
else:
try:
# Different things can fail here: the locale module may not be
# loaded, it may not offer nl_langinfo, or CODESET, or the
# resulting codeset may be unknown to Python. We ignore all
# these problems, falling back to ASCII
encoding = locale.nl_langinfo(locale.CODESET)
if encoding is None or encoding is '':
# situation occurs on Mac OS X
encoding = 'ascii'
codecs.lookup(encoding)
except (NameError, AttributeError, LookupError):
# Try getdefaultlocale well: it parses environment variables,
# which may give a clue. Unfortunately, getdefaultlocale has
# bugs that can cause ValueError.
try:
encoding = locale.getdefaultlocale()[1]
if encoding is None or encoding is '':
# situation occurs on Mac OS X
encoding = 'ascii'
codecs.lookup(encoding)
except (ValueError, LookupError):
pass
encoding = encoding.lower()
coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
class EncodingMessage(SimpleDialog):
"Inform user that an encoding declaration is needed."
def __init__(self, master, enc):
self.should_edit = False
self.root = top = Toplevel(master)
top.bind("<Return>", self.return_event)
top.bind("<Escape>", self.do_ok)
top.protocol("WM_DELETE_WINDOW", self.wm_delete_window)
top.wm_title("I/O Warning")
top.wm_iconname("I/O Warning")
self.top = top
l1 = Label(top,
text="Non-ASCII found, yet no encoding declared. Add a line like")
l1.pack(side=TOP, anchor=W)
l2 = Entry(top, font="courier")
l2.insert(0, "# -*- coding: %s -*-" % enc)
# For some reason, the text is not selectable anymore if the
# widget is disabled.
# l2['state'] = DISABLED
l2.pack(side=TOP, anchor = W, fill=X)
l3 = Label(top, text="to your file\n"
"Choose OK to save this file as %s\n"
"Edit your general options to silence this warning" % enc)
l3.pack(side=TOP, anchor = W)
buttons = Frame(top)
buttons.pack(side=TOP, fill=X)
# Both return and cancel mean the same thing: do nothing
self.default = self.cancel = 0
b1 = Button(buttons, text="Ok", default="active",
command=self.do_ok)
b1.pack(side=LEFT, fill=BOTH, expand=1)
b2 = Button(buttons, text="Edit my file",
command=self.do_edit)
b2.pack(side=LEFT, fill=BOTH, expand=1)
self._set_transient(master)
def do_ok(self):
self.done(0)
def do_edit(self):
self.done(1)
def coding_spec(str):
"""Return the encoding declaration according to PEP 263.
Raise LookupError if the encoding is declared but unknown.
"""
# Only consider the first two lines
str = str.split("\n")[:2]
str = "\n".join(str)
match = coding_re.search(str)
if not match:
return None
name = match.group(1)
# Check whether the encoding is known
import codecs
try:
codecs.lookup(name)
except LookupError:
# The standard encoding error does not indicate the encoding
raise LookupError, "Unknown encoding "+name
return name
class IOBinding:
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
self.__id_save = self.text.bind("<<save-window>>", self.save)
self.__id_saveas = self.text.bind("<<save-window-as-file>>",
self.save_as)
self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind("<<print-window>>", self.print_window)
def close(self):
# Undo command bindings
self.text.unbind("<<open-window-from-file>>", self.__id_open)
self.text.unbind("<<save-window>>", self.__id_save)
self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
self.text.unbind("<<print-window>>", self.__id_print)
# Break cycles
self.editwin = None
self.text = None
self.filename_change_hook = None
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
def open(self, event=None, editFile=None):
if self.editwin.flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
# If the current window has no filename and hasn't been
# modified, we replace its contents (no loss). Otherwise
# we open a new window. But we won't replace the
# shell window (which has an interp(reter) attribute), which
# gets set to "not modified" at every new prompt.
try:
interp = self.editwin.interp
except:
interp = None
if not self.filename and self.get_saved() and not interp:
self.editwin.flist.open(filename, self.loadfile)
else:
self.editwin.flist.open(filename)
else:
self.text.focus_set()
return "break"
#
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
if reply == "cancel":
self.text.focus_set()
return "break"
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return "break"
eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
eol_re = re.compile(eol)
eol_convention = os.linesep # Default
def loadfile(self, filename):
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
f = open(filename,'rb')
chars = f.read()
f.close()
except IOError, msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
chars = self.decode(chars)
# We now convert all end-of-lines to '\n's
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
if isinstance(self.eol_convention, unicode):
# Make sure it is an ASCII string
self.eol_convention = self.eol_convention.encode("ascii")
chars = self.eol_re.sub(r"\n", chars)
self.text.delete("1.0", "end")
self.set_filename(None)
self.text.insert("1.0", chars)
self.reset_undo()
self.set_filename(filename)
self.text.mark_set("insert", "1.0")
self.text.see("insert")
self.updaterecentfileslist(filename)
return True
def decode(self, chars):
"""Create a Unicode string
If that fails, let Tcl try its best
"""
# Check presence of a UTF-8 signature first
if chars.startswith(BOM_UTF8):
try:
chars = chars[3:].decode("utf-8")
except UnicodeError:
# has UTF-8 signature, but fails to decode...
return chars
else:
# Indicates that this file originally had a BOM
self.fileencoding = BOM_UTF8
return chars
# Next look for coding specification
try:
enc = coding_spec(chars)
except LookupError, name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
"installation. The file may not display correctly" % name,
master = self.text)
enc = None
if enc:
try:
return unicode(chars, enc)
except UnicodeError:
pass
# If it is ASCII, we need not to record anything
try:
return unicode(chars, 'ascii')
except UnicodeError:
pass
# Finally, try the locale's encoding. This is deprecated;
# the user should declare a non-ASCII encoding
try:
chars = unicode(chars, encoding)
self.fileencoding = encoding
except UnicodeError:
pass
return chars
def maybesave(self):
if self.get_saved():
return "yes"
message = "Do you want to save %s before closing?" % (
self.filename or "this untitled document")
m = tkMessageBox.Message(
title="Save On Close",
message=message,
icon=tkMessageBox.QUESTION,
type=tkMessageBox.YESNOCANCEL,
master=self.text)
reply = m.show()
if reply == "yes":
self.save(None)
if not self.get_saved():
reply = "cancel"
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
else:
if self.writefile(self.filename):
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError: # may be a PyShell
pass
self.text.focus_set()
return "break"
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def writefile(self, filename):
self.fixlastline()
chars = self.encode(self.text.get("1.0", "end-1c"))
if self.eol_convention != "\n":
chars = chars.replace("\n", self.eol_convention)
try:
f = open(filename, "wb")
f.write(chars)
f.flush()
f.close()
return True
except IOError, msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
def encode(self, chars):
if isinstance(chars, types.StringType):
# This is either plain ASCII, or Tk was returning mixed-encoding
# text to us. Don't try to guess further.
return chars
# See whether there is anything non-ASCII in it.
# If not, no need to figure out the encoding.
try:
return chars.encode('ascii')
except UnicodeError:
pass
# If there is an encoding declared, try this first.
try:
enc = coding_spec(chars)
failed = None
except LookupError, msg:
failed = msg
enc = None
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
if failed:
tkMessageBox.showerror(
"I/O Error",
"%s. Saving as UTF-8" % failed,
master = self.text)
# If there was a UTF-8 signature, use that. This should not fail
if self.fileencoding == BOM_UTF8 or failed:
return BOM_UTF8 + chars.encode("utf-8")
# Try the original file encoding next, if any
if self.fileencoding:
try:
return chars.encode(self.fileencoding)
except UnicodeError:
tkMessageBox.showerror(
"I/O Error",
"Cannot save this as '%s' anymore. Saving as UTF-8" \
% self.fileencoding,
master = self.text)
return BOM_UTF8 + chars.encode("utf-8")
# Nothing was declared, and we had not determined an encoding
# on loading. Recommend an encoding line.
config_encoding = idleConf.GetOption("main","EditorWindow",
"encoding")
if config_encoding == 'utf-8':
# User has requested that we save files as UTF-8
return BOM_UTF8 + chars.encode("utf-8")
ask_user = True
try:
chars = chars.encode(encoding)
enc = encoding
if config_encoding == 'locale':
ask_user = False
except UnicodeError:
chars = BOM_UTF8 + chars.encode("utf-8")
enc = "utf-8"
if not ask_user:
return chars
dialog = EncodingMessage(self.editwin.top, enc)
dialog.go()
if dialog.num == 1:
# User asked us to edit the file
encline = "# -*- coding: %s -*-\n" % enc
firstline = self.text.get("1.0", "2.0")
if firstline.startswith("#!"):
# Insert encoding after #! line
self.text.insert("2.0", encline)
else:
self.text.insert("1.0", encline)
return self.encode(self.text.get("1.0", "end-1c"))
return chars
def fixlastline(self):
c = self.text.get("end-2c")
if c != '\n':
self.text.insert("end-1c", "\n")
def print_window(self, event):
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
# shell undo is reset after every prompt, looks saved, probably isn't
if not saved or filename is None:
# XXX KBK 08Jun03 Wouldn't it be better to ask the user to save?
(tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return "break"
platform=os.name
printPlatform=1
if platform == 'posix': #posix platform
command = idleConf.GetOption('main','General',
'print-command-posix')
command = command + " 2>&1"
elif platform == 'nt': #win32 platform
command = idleConf.GetOption('main','General','print-command-win')
else: #no printing for this platform
printPlatform=0
if printPlatform: #we can try to print for this platform
command = command % filename
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
status = pipe.close()
if status:
output = "Printing failed (exit status 0x%x)\n" % \
status + output
if output:
output = "Printing command: %s\n" % repr(command) + output
tkMessageBox.showerror("Print status", output, master=self.text)
else: #no printing for this platform
message="Printing is not enabled for this platform: %s" % platform
tkMessageBox.showinfo("Print status", message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return "break"
opendialog = None
savedialog = None
filetypes = [
("Python and text files", "*.py *.pyw *.txt", "TEXT"),
("All text files", "*", "TEXT"),
("All files", "*"),
]
def askopenfile(self):
dir, base = self.defaultfilename("open")
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text,
filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
if isinstance(filename, unicode):
filename = filename.encode(filesystemencoding)
return filename
def defaultfilename(self, mode="open"):
if self.filename:
return os.path.split(self.filename)
elif self.dirname:
return self.dirname, ""
else:
try:
pwd = os.getcwd()
except os.error:
pwd = ""
return pwd, ""
def asksavefile(self):
dir, base = self.defaultfilename("save")
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(master=self.text,
filetypes=self.filetypes)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
if isinstance(filename, unicode):
filename = filename.encode(filesystemencoding)
return filename
def updaterecentfileslist(self,filename):
"Update recent file list on all editor windows"
self.editwin.update_recent_files_list(filename)
def test():
root = Tk()
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
self.text.bind("<Alt-s>", self.save_as)
self.text.bind("<Alt-z>", self.save_a_copy)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
def open(self, event):
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
def save_as(self, event):
self.text.event_generate("<<save-window-as-file>>")
def save_a_copy(self, event):
self.text.event_generate("<<save-copy-of-window-as-file>>")
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
root.mainloop()
if __name__ == "__main__":
test()
|
from celery import chain
from waldur_core.core import executors as core_executors
from waldur_core.core import tasks as core_tasks
from . import tasks
class IssueCreateExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, issue, serialized_issue, **kwargs):
return chain(
core_tasks.StateTransitionTask().si(
serialized_issue,
state_transition='begin_creating',
action='',
action_details={},
),
tasks.create_issue.si(serialized_issue),
tasks.create_confirmation_comment.si(
serialized_issue, kwargs.get('comment_tmpl')
),
)
class IssueDeleteExecutor(
core_executors.DeleteExecutorMixin, core_executors.BaseExecutor
):
@classmethod
def get_task_signature(cls, issue, serialized_issue, **kwargs):
return (core_tasks.BackendMethodTask().si(serialized_issue, 'delete_issue'),)
class FeedbackExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, feedback, serialized_feedback, **kwargs):
return tasks.sync_feedback.si(serialized_feedback)
|
"""
Blackjack.py - Yevheniy Chuba - Spring 2014
Implementation of Blackjack. Enjoy: http://www.codeskulptor.org/#user31_R8PVRLqskziSghE.py
Although we used class-specific CodeSculptor for graphics, most of the methods
and the rest of the concepts are similar if not the same in other Python librararies,
such as Pygame.
Learned to:
- play Blackjack
- nice intro to OOP
- track/flowchart complex logic
"""
import simplegui
import random
CARD_SIZE = (73, 98)
CARD_CENTER = (36.5, 49)
card_images = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/cards.jfitz.png")
CARD_BACK_SIZE = (71, 96)
CARD_BACK_CENTER = (35.5, 48)
card_back = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/card_back.png")
all_cards = []
in_play = False
win_loose_mess = ""
hit_stand_mess = "Hit or Stand?"
score = 0
player_hand = []
dealer_hand = []
SUITS = ['C', 'S', 'H', 'D']
RANKS = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", self.suit, self.rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_SIZE[0] * (0.5 + RANKS.index(self.rank)), CARD_SIZE[1] * (0.5 + SUITS.index(self.suit)))
canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_SIZE[0] / 2, pos[1] + CARD_SIZE[1] / 2], CARD_SIZE)
class Hand:
def __init__(self, deck_cards, player_hand, dealer_hand):
self.deck_cards = deck_cards
self.player_hand = player_hand
self.dealer_hand = dealer_hand
def __str__(self):
pass # replace with your code
def add_card(self):
card = self.deck_cards[4]
self.player_hand += [card]
return self.player_hand
def add_card_dealer(self):
card = self.deck_cards[4]
self.dealer_hand += [card]
return self.dealer_hand
# count aces as 1, if the hand has an ace, then add 10 to hand value if don't bust
def get_value(self, player_hand):
final_value = []
self.player_hand = player_hand
for card in self.player_hand:
final_value.append(VALUES[card[1]])
if sum(final_value) > 21:
final_value = [100,1]
return sum(final_value)
def get_value_dealer(self, dealer_hand):
final_value = []
self.dealer_hand = dealer_hand
for card in self.dealer_hand:
final_value.append(VALUES[card[1]])
if sum(final_value) > 21:
final_value = [100,1]
return sum(final_value)
def busted(self):
in_play = False
pass # replace with your code
def draw(self, canvas, p):
pass # replace with your code
class Deck:
def __init__(self):
self.RANKS = RANKS
self.SUITS = SUITS
self.deck_cards = []
# add cards back to deck and shuffle
def shuffle_cards(self):
for rank in self.RANKS:
card_c = [self.SUITS[0], rank]
card_h = [self.SUITS[1], rank]
card_d = [self.SUITS[2], rank]
card_s = [self.SUITS[3], rank]
self.deck_cards.append(card_c)
self.deck_cards.append(card_h)
self.deck_cards.append(card_d)
self.deck_cards.append(card_s)
random.shuffle(self.deck_cards)
return self.deck_cards
def deal_card(self):
card = self.deck_cards[4]
self.deck_cards.pop(4)
return card
def deal():
global win_loose_mess, hit_stand_mess, in_play, score, deck, all_cards
global dealers_card_1, dealers_card_2, players_card_1, players_card_2, player_hand, dealer_hand
if in_play:
player_hand = []
dealer_hand = []
all_cards = []
deck = Deck() # initiate the game
all_cards = deck.shuffle_cards()
#Initiate the cards
dealers_card_1 = Card(all_cards[0][0], all_cards[0][1])
dealers_card_2 = Card(all_cards[1][0], all_cards[1][1])
dealer_hand = [[dealers_card_1.get_suit(), dealers_card_1.get_rank()],
[dealers_card_2.get_suit(), dealers_card_2.get_rank()]]
players_card_1 = Card(all_cards[2][0], all_cards[2][1])
players_card_2 = Card(all_cards[3][0], all_cards[3][1])
player_hand = [[players_card_1.get_suit(),players_card_1.get_rank()],
[players_card_2.get_suit(), players_card_2.get_rank()]]
score = score
win_loose_mess = ""
hit_stand_mess = "Hit or Stand?"
else: # if the game has not been played yet
deck = Deck() # initiate the game
all_cards = deck.shuffle_cards()
in_play = True
def hit():
global player_hand, dealer_hand, player_final_value, score, win_loose_mess, hit_stand_mess, in_play
# if the hand is in play, hit the player
if in_play:
playing_hand = Hand(all_cards, player_hand, dealer_hand)
player_hand = playing_hand.add_card()
player_hand_value = playing_hand.get_value(player_hand)
player_final_value = player_hand_value
print player_hand_value
#print player_hand_value
if player_hand_value == 101:
score -= 1
win_loose_mess = "You Lost - over 21"
hit_stand_mess = "New Deal?"
# if busted, assign an message to outcome, update in_play and score
def stand():
global player_hand, dealer_hand, player_final_value, dealer_final_value, score, win_loose_mess, hit_stand_mess, in_play
dealing_hand = Hand(all_cards, player_hand, dealer_hand)
dealer_hand_value = dealing_hand.get_value_dealer(dealer_hand)
playing_hand = Hand(all_cards, player_hand, dealer_hand)
player_final_value = playing_hand.get_value(player_hand)
if dealer_hand_value == 101:
score += 1
win_loose_mess = "You Win!"
hit_stand_mess = "New Deal?"
elif dealer_hand_value < 17:
dealer_hand = dealing_hand.add_card_dealer()
dealer_hand_value = dealing_hand.get_value_dealer(dealer_hand)
if dealer_hand_value == 101:
score += 1
win_loose_mess = "You Win!"
hit_stand_mess = "New Deal?"
elif dealer_hand_value > player_final_value:
score -= 1
win_loose_mess = "You Lost"
hit_stand_mess = "New Deal?"
elif dealer_hand_value < player_final_value:
score += 1
win_loose_mess = "You Win!"
hit_stand_mess = "New Deal?"
elif dealer_hand_value == player_final_value:
score -= 1
win_loose_mess = "You Lost"
hit_stand_mess = "New Deal?"
elif dealer_hand_value > 17:
if dealer_hand_value > player_final_value:
score -= 1
win_loose_mess = "You Lost"
hit_stand_mess = "New Deal?"
elif dealer_hand_value < player_final_value:
score += 1
win_loose_mess = "You Win!"
hit_stand_mess = "New Deal?"
elif dealer_hand_value == player_final_value:
score -= 1
win_loose_mess = "You Lost"
hit_stand_mess = "New Deal?"
def draw(canvas):
global all_cards, win_loose_mess, hit_stand_mess, score, player_hand, dealer_hand
# draw static text: blackjack, dealer, player
canvas.draw_text('Blackjack', (5, 50), 50, 'Blue', 'serif')
canvas.draw_text('Dealer', (20, 150), 40, 'Black', 'serif')
canvas.draw_text('Player', (20, 350), 40, 'Black', 'serif')
# draw dynamic text: Score, "Hit or Stand" or "New Deal"
canvas.draw_text('Score: '+str(score), (400, 50), 40, 'Black', 'serif')
canvas.draw_text(hit_stand_mess, (230, 350), 40, 'Black', 'serif')
canvas.draw_text(win_loose_mess, (230, 150), 40, 'Black', 'serif')
dealers_card_1.draw(canvas, [60, 170])
dealers_card_2.draw(canvas, [170, 170])
players_card_1.draw(canvas, [60, 400])
players_card_2.draw(canvas, [170, 400])
if len(dealer_hand) == 3:
dealers_card_3 = Card(all_cards[4][0], all_cards[4][1])
dealers_card_3.draw(canvas, [290, 170])
dealer_hand = [[dealers_card_1.get_suit(),dealers_card_1.get_rank()],
[dealers_card_2.get_suit(), dealers_card_2.get_rank()],
[dealers_card_3.get_suit(), dealers_card_3.get_rank()]]
if len(player_hand) == 3:
players_card_3 = Card(all_cards[5][0], all_cards[5][1])
players_card_3.draw(canvas, [290, 400])
player_hand = [[players_card_1.get_suit(),players_card_1.get_rank()],
[players_card_2.get_suit(), players_card_2.get_rank()],
[players_card_3.get_suit(), players_card_3.get_rank()]]
if len(dealer_hand) == 4:
dealers_card_3 = Card(all_cards[4][0], all_cards[4][1])
dealers_card_3.draw(canvas, [290, 170])
dealers_card_4 = Card(all_cards[6][0], all_cards[6][1])
dealers_card_4.draw(canvas, [400, 170])
dealer_hand = [[dealers_card_1.get_suit(),dealers_card_1.get_rank()],
[dealers_card_2.get_suit(), dealers_card_2.get_rank()],
[dealers_card_3.get_suit(), dealers_card_3.get_rank()],
[dealers_card_4.get_suit(), dealers_card_4.get_rank()]]
if len(player_hand) == 4:
players_card_3 = Card(all_cards[5][0], all_cards[5][1])
players_card_3.draw(canvas, [290, 400])
players_card_4 = Card(all_cards[7][0], all_cards[7][1])
players_card_4.draw(canvas, [400, 400])
player_hand = [[players_card_1.get_suit(),players_card_1.get_rank()],
[players_card_2.get_suit(), players_card_2.get_rank()],
[players_card_3.get_suit(), players_card_3.get_rank()],
[players_card_4.get_suit(), players_card_4.get_rank()]]
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
deal()
dealers_card_1 = Card(all_cards[0][0], all_cards[0][1])
dealers_card_2 = Card(all_cards[1][0], all_cards[1][1])
dealer_hand = [[dealers_card_1.get_suit(), dealers_card_1.get_rank()],
[dealers_card_2.get_suit(), dealers_card_2.get_rank()]]
players_card_1 = Card(all_cards[2][0], all_cards[2][1])
players_card_2 = Card(all_cards[3][0], all_cards[3][1])
player_hand = [[players_card_1.get_suit(),players_card_1.get_rank()],
[players_card_2.get_suit(), players_card_2.get_rank()]]
frame.start()
|
from backbone import Backbone
from datetime import datetime
import re
import json
import glob
import os
def extractYouTubeIDFRomLink(url):
#youtu.be/qFIUHACQ-gM?a
pattern = 'youtu.be/(.*)\?a$'
p = re.compile(pattern,re.M | re.I)
matches = p.findall(url)
if len(matches) > 0:
return matches[0]
else:
return -1
def extractDataFromTweet(tweet):
user = ''
item = ''
timestamp = ''
#user
user = tweet['user']['id']
#timestamp
timestamp = tweet['created_at']
the_time = datetime.strptime(timestamp.replace(' +0000',''), '%a %b %d %H:%M:%S %Y')
timestamp = (the_time-datetime(1970,1,1)).total_seconds()
timestamp = int(timestamp)
#item
url = tweet['entities']['urls'][0]['display_url']
item = extractYouTubeIDFRomLink(url)
return user, item, timestamp
def extractDataset(tweets):
dataset = list()
for tweet in tweets:
try:
user, item, timestamp = extractDataFromTweet(tweet)
if user == -1 or item == -1 or timestamp == -1:
continue
except:
continue
dataset.append((user, item, timestamp))
return dataset
def writeDataset(dataset, filename):
lines = list()
for (user,item,timestamp) in dataset:
line = str(user) + '::' + str(item) + '::' + str(timestamp) + '\n'
lines.append(line)
with file(filename, 'a') as outfile:
outfile.writelines(lines)
def writeTweets(tweets, filename):
line = json.dumps(tweets, ensure_ascii = False).encode('UTF-8')
with file(filename, 'w') as outfile:
outfile.writelines(line)
def get_since_id(path):
since_id = 0
for infile in glob.glob( os.path.join(path, 'tweets_*.json') ):
pattern = 'tweets_([0-9]*).json'
p = re.compile(pattern,re.M | re.I)
matches = p.findall(infile)
id = int(matches[0])
#keep maximum id
since_id = max(id, since_id)
return since_id
if __name__ == "__main__":
b = Backbone()
datasetpath = 'datasets/YouTube'
since_id = get_since_id(datasetpath)
tweets, new_since_id = b.searchTweets('I liked a @YouTube video', since_id)
dataset = extractDataset(tweets)
writeDataset(dataset, datasetpath + '/likes.dat')
writeTweets(tweets,datasetpath + '/tweets_' + str(new_since_id) + '.json')
|
import mido
import random
import math
from PIL import Image
random.seed()
class X:
def eval(self, x, y):
return x
def __str__(self):
return "x"
class Y:
def eval(self, x, y):
return y
def __str__(self):
return "y"
class SinPi:
def __init__(self, prob):
self.arg = buildExpr(prob * prob)
def __str__(self):
return "sin(pi*" + str(self.arg) + ")"
def eval(self, x, y):
return math.sin(math.pi * self.arg.eval(x, y))
class CosPi:
def __init__(self, prob):
self.arg = buildExpr(prob * prob)
def __str__(self):
return "cos(pi*" + str(self.arg) + ")"
def eval(self, x, y):
return math.cos(math.pi * self.arg.eval(x, y))
class Times:
def __init__(self, prob):
self.lhs = buildExpr(prob * prob)
self.rhs = buildExpr(prob * prob)
def __str__(self):
return str(self.lhs) + "*" + str(self.rhs)
def eval(self, x, y):
return self.lhs.eval(x, y) * self.rhs.eval(x, y)
def buildExpr(prob=0.99):
if random.random() < prob:
return random.choice([SinPi, CosPi, Times])(prob)
else:
return random.choice([X, Y])()
def plotIntensity(exp, pixelsPerUnit=150):
canvasWidth = 2 * pixelsPerUnit + 1
canvas = Image.new("L", (canvasWidth, canvasWidth))
for py in range(canvasWidth):
for px in range(canvasWidth):
# Convert pixel location to [-1,1] coordinates
x = float(px - pixelsPerUnit) / pixelsPerUnit
y = -float(py - pixelsPerUnit) / pixelsPerUnit
z = exp.eval(x, y)
# Scale [-1,1] result to [0,255].
intensity = int(z * 127.5 + 127.5)
canvas.putpixel((px, py), intensity)
return canvas
def plotColor(redExp, greenExp, blueExp, pixelsPerUnit=150):
redPlane = plotIntensity(redExp, pixelsPerUnit)
greenPlane = plotIntensity(greenExp, pixelsPerUnit)
bluePlane = plotIntensity(blueExp, pixelsPerUnit)
return Image.merge("RGB", (redPlane, greenPlane, bluePlane))
def makeImage(numPics=20):
i = 0
with mido.open_input('EWI-USB MIDI 1') as inport:
with open("eqns.txt", 'w') as eqnsFile:
for msg in inport:
print("Building exprs")
ctrltype = msg.bytes()[0]
note = msg.bytes()[1]
velocity = msg.bytes()[2]
print(ctrltype, note, velocity)
redExp = buildExpr(ctrltype)
greenExp = buildExpr(note)
blueExp = buildExpr(velocity)
print("Writing log")
eqnsFile.write("img" + str(i) + ":\n")
eqnsFile.write("red = " + str(redExp) + "\n")
eqnsFile.write("green = " + str(greenExp) + "\n")
eqnsFile.write("blue = " + str(blueExp) + "\n\n")
print("Generatong images")
image = plotColor(redExp, greenExp, blueExp)
image.save("img" + str(i) + ".png", "PNG")
i += 1
makeImage(20)
|
"""This module defines a base Exporter class. For Jinja template-based export,
see templateexporter.py.
"""
from __future__ import print_function, absolute_import
import io
import os
import copy
import collections
import datetime
from IPython.config.configurable import LoggingConfigurable
from IPython.config import Config
from IPython import nbformat
from IPython.utils.traitlets import MetaHasTraits, Unicode, List, TraitError
from IPython.utils.importstring import import_item
from IPython.utils import text, py3compat
class ResourcesDict(collections.defaultdict):
def __missing__(self, key):
return ''
class FilenameExtension(Unicode):
"""A trait for filename extensions."""
default_value = u''
info_text = 'a filename extension, beginning with a dot'
def validate(self, obj, value):
# cast to proper unicode
value = super(FilenameExtension, self).validate(obj, value)
# check that it starts with a dot
if value and not value.startswith('.'):
msg = "FileExtension trait '{}' does not begin with a dot: {!r}"
raise TraitError(msg.format(self.name, value))
return value
class Exporter(LoggingConfigurable):
"""
Class containing methods that sequentially run a list of preprocessors on a
NotebookNode object and then return the modified NotebookNode object and
accompanying resources dict.
"""
file_extension = FilenameExtension(
'.txt', config=True,
help="Extension of the file that should be written to disk"
)
# MIME type of the result file, for HTTP response headers.
# This is *not* a traitlet, because we want to be able to access it from
# the class, not just on instances.
output_mimetype = ''
#Configurability, allows the user to easily add filters and preprocessors.
preprocessors = List(config=True,
help="""List of preprocessors, by name or namespace, to enable.""")
_preprocessors = List()
default_preprocessors = List([
'IPython.nbconvert.preprocessors.ClearOutputPreprocessor',
'IPython.nbconvert.preprocessors.ExecutePreprocessor',
'IPython.nbconvert.preprocessors.coalesce_streams',
'IPython.nbconvert.preprocessors.SVG2PDFPreprocessor',
'IPython.nbconvert.preprocessors.CSSHTMLHeaderPreprocessor',
'IPython.nbconvert.preprocessors.RevealHelpPreprocessor',
'IPython.nbconvert.preprocessors.LatexPreprocessor',
'IPython.nbconvert.preprocessors.HighlightMagicsPreprocessor',
'IPython.nbconvert.preprocessors.ExtractOutputPreprocessor',
],
config=True,
help="""List of preprocessors available by default, by name, namespace,
instance, or type.""")
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : config
User configuration instance.
"""
with_default_config = self.default_config
if config:
with_default_config.merge(config)
super(Exporter, self).__init__(config=with_default_config, **kw)
self._init_preprocessors()
@property
def default_config(self):
return Config()
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~IPython.nbformat.NotebookNode`
Notebook node (dict-like with attr-access)
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
**kw
Ignored (?)
"""
nb_copy = copy.deepcopy(nb)
resources = self._init_resources(resources)
if 'language' in nb['metadata']:
resources['language'] = nb['metadata']['language'].lower()
# Preprocess
nb_copy, resources = self._preprocess(nb_copy, resources)
return nb_copy, resources
def from_filename(self, filename, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
filename : str
Full filename of the notebook file to open and convert.
"""
# Pull the metadata from the filesystem.
if resources is None:
resources = ResourcesDict()
if not 'metadata' in resources or resources['metadata'] == '':
resources['metadata'] = ResourcesDict()
path, basename = os.path.split(filename)
notebook_name = basename[:basename.rfind('.')]
resources['metadata']['name'] = notebook_name
resources['metadata']['path'] = path
modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
resources['metadata']['modified_date'] = modified_date.strftime(text.date_format)
with io.open(filename, encoding='utf-8') as f:
return self.from_notebook_node(nbformat.read(f, as_version=4), resources=resources, **kw)
def from_file(self, file_stream, resources=None, **kw):
"""
Convert a notebook from a notebook file.
Parameters
----------
file_stream : file-like object
Notebook file-like object to convert.
"""
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
def register_preprocessor(self, preprocessor, enabled=False):
"""
Register a preprocessor.
Preprocessors are classes that act upon the notebook before it is
passed into the Jinja templating engine. preprocessors are also
capable of passing additional information to the Jinja
templating engine.
Parameters
----------
preprocessor : preprocessor
"""
if preprocessor is None:
raise TypeError('preprocessor')
isclass = isinstance(preprocessor, type)
constructed = not isclass
# Handle preprocessor's registration based on it's type
if constructed and isinstance(preprocessor, py3compat.string_types):
# Preprocessor is a string, import the namespace and recursively call
# this register_preprocessor method
preprocessor_cls = import_item(preprocessor)
return self.register_preprocessor(preprocessor_cls, enabled)
if constructed and hasattr(preprocessor, '__call__'):
# Preprocessor is a function, no need to construct it.
# Register and return the preprocessor.
if enabled:
preprocessor.enabled = True
self._preprocessors.append(preprocessor)
return preprocessor
elif isclass and isinstance(preprocessor, MetaHasTraits):
# Preprocessor is configurable. Make sure to pass in new default for
# the enabled flag if one was specified.
self.register_preprocessor(preprocessor(parent=self), enabled)
elif isclass:
# Preprocessor is not configurable, construct it
self.register_preprocessor(preprocessor(), enabled)
else:
# Preprocessor is an instance of something without a __call__
# attribute.
raise TypeError('preprocessor')
def _init_preprocessors(self):
"""
Register all of the preprocessors needed for this exporter, disabled
unless specified explicitly.
"""
self._preprocessors = []
# Load default preprocessors (not necessarly enabled by default).
for preprocessor in self.default_preprocessors:
self.register_preprocessor(preprocessor)
# Load user-specified preprocessors. Enable by default.
for preprocessor in self.preprocessors:
self.register_preprocessor(preprocessor, enabled=True)
def _init_resources(self, resources):
#Make sure the resources dict is of ResourcesDict type.
if resources is None:
resources = ResourcesDict()
if not isinstance(resources, ResourcesDict):
new_resources = ResourcesDict()
new_resources.update(resources)
resources = new_resources
#Make sure the metadata extension exists in resources
if 'metadata' in resources:
if not isinstance(resources['metadata'], ResourcesDict):
new_metadata = ResourcesDict()
new_metadata.update(resources['metadata'])
resources['metadata'] = new_metadata
else:
resources['metadata'] = ResourcesDict()
if not resources['metadata']['name']:
resources['metadata']['name'] = 'Notebook'
#Set the output extension
resources['output_extension'] = self.file_extension
return resources
def _preprocess(self, nb, resources):
"""
Preprocess the notebook before passing it into the Jinja engine.
To preprocess the notebook is to apply all of the
Parameters
----------
nb : notebook node
notebook that is being exported.
resources : a dict of additional resources that
can be accessed read/write by preprocessors
"""
# Do a copy.deepcopy first,
# we are never safe enough with what the preprocessors could do.
nbc = copy.deepcopy(nb)
resc = copy.deepcopy(resources)
#Run each preprocessor on the notebook. Carry the output along
#to each preprocessor
for preprocessor in self._preprocessors:
nbc, resc = preprocessor(nbc, resc)
return nbc, resc
|
import sys
import os
import socket
import unittest
import time
import logging
from plumbum import RemotePath, SshMachine, ProcessExecutionError, local, ProcessTimedOut, NOHUP
from plumbum import CommandNotFound
from plumbum.lib import six
from plumbum._testtools import skipIf, skip_without_chown, skip_on_windows
TEST_HOST = "127.0.0.1"
if TEST_HOST not in ("::1", "127.0.0.1", "localhost"):
import plumbum
plumbum.local.env.path.append("c:\\Program Files\\Git\\bin")
@skip_on_windows
class RemotePathTest(unittest.TestCase):
def _connect(self):
return SshMachine(TEST_HOST)
def test_name(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").name
self.assertTrue(isinstance(name, six.string_types))
self.assertEqual("file.txt", str(name))
def test_dirname(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").dirname
self.assertTrue(isinstance(name, RemotePath))
self.assertEqual("/some/long/path/to", str(name))
def test_uri(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
self.assertEqual("ftp://", p1.as_uri('ftp')[:6])
self.assertEqual("ssh://", p1.as_uri('ssh')[:6])
self.assertEqual("/some/long/path/to/file.txt", p1.as_uri()[-27:])
def test_stem(self):
p = RemotePath(self._connect(), "/some/long/path/to/file.txt")
self.assertEqual(p.stem, "file")
p = RemotePath(self._connect(), "/some/long/path/")
self.assertEqual(p.stem, "path")
def test_suffix(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
strcmp = lambda a,b: self.assertEqual(str(a),str(b))
self.assertEqual(p1.suffix, ".txt")
self.assertEqual(p1.suffixes, [".txt"])
self.assertEqual(p2.suffix, ".gz")
self.assertEqual(p2.suffixes, [".tar",".gz"])
strcmp(p1.with_suffix(".tar.gz"), RemotePath(self._connect(), "/some/long/path/to/file.tar.gz"))
strcmp(p2.with_suffix(".other"), RemotePath(self._connect(), "file.tar.other"))
strcmp(p2.with_suffix(".other", 2), RemotePath(self._connect(), "file.other"))
strcmp(p2.with_suffix(".other", 0), RemotePath(self._connect(), "file.tar.gz.other"))
strcmp(p2.with_suffix(".other", None), RemotePath(self._connect(), "file.other"))
def test_newname(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
strcmp = lambda a,b: self.assertEqual(str(a),str(b))
strcmp(p1.with_name("something.tar"), RemotePath(self._connect(), "/some/long/path/to/something.tar"))
strcmp(p2.with_name("something.tar"), RemotePath(self._connect(), "something.tar"))
@skip_without_chown
def test_chown(self):
with self._connect() as rem:
with rem.tempdir() as dir:
p = dir / "foo.txt"
p.write(six.b("hello"))
# because we're connected to localhost, we expect UID and GID to be the same
self.assertEqual(p.uid, os.getuid())
self.assertEqual(p.gid, os.getgid())
p.chown(p.uid.name)
self.assertEqual(p.uid, os.getuid())
@skip_on_windows
class BaseRemoteMachineTest(object):
TUNNEL_PROG = r"""import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write("{0}\n".format( s.getsockname()[1]))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
def test_basic(self):
with self._connect() as rem:
r_ssh = rem["ssh"]
r_ls = rem["ls"]
r_grep = rem["grep"]
lines = r_ls("-a").splitlines()
self.assertTrue(".bashrc" in lines or ".bash_profile" in lines)
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
cmd = r_ssh["localhost", "cd", rem.cwd, "&&", r_ls, "|", r_grep["\\.py"]]
self.assertTrue("'|'" in str(cmd))
self.assertTrue("test_remote.py" in cmd())
self.assertTrue("test_remote.py" in [f.name for f in rem.cwd // "*.py"])
def test_glob(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*.py", "*.bash")]
self.assertTrue("test_remote.py" in filenames)
self.assertTrue("slow_process.bash" in filenames)
def test_download_upload(self):
with self._connect() as rem:
rem.upload("test_remote.py", "/tmp")
r_ls = rem["ls"]
r_rm = rem["rm"]
self.assertTrue("test_remote.py" in r_ls("/tmp").splitlines())
rem.download("/tmp/test_remote.py", "/tmp/test_download.txt")
r_rm("/tmp/test_remote.py")
r_rm("/tmp/test_download.txt")
def test_session(self):
with self._connect() as rem:
sh = rem.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
self.assertTrue(".bashrc" in out or ".bash_profile" in out)
def test_env(self):
with self._connect() as rem:
self.assertRaises(ProcessExecutionError, rem.python, "-c",
"import os;os.environ['FOOBAR72']")
with rem.env(FOOBAR72 = "lala"):
with rem.env(FOOBAR72 = "baba"):
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
self.assertEqual(out.strip(), "baba")
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
self.assertEqual(out.strip(), "lala")
# path manipulation
self.assertRaises(CommandNotFound, rem.which, "dummy-executable")
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem.env.path.insert(0, rem.cwd / "not-in-path")
p = rem.which("dummy-executable")
self.assertEqual(p, rem.cwd / "not-in-path" / "dummy-executable")
def test_read_write(self):
with self._connect() as rem:
with rem.tempdir() as dir:
self.assertTrue(dir.is_dir())
data = six.b("hello world")
(dir / "foo.txt").write(data)
self.assertEqual((dir / "foo.txt").read(), data)
self.assertFalse(dir.exists())
def test_contains(self):
with self._connect() as rem:
self.assertTrue("ls" in rem, "Expected to find `ls`")
def test_iter_lines_timeout(self):
with self._connect() as rem:
try:
for i, (out, err) in enumerate(rem["ping"]["-i", 0.5, "127.0.0.1"].popen().iter_lines(timeout=2)):
print("out:", out)
print("err:", err)
except NotImplementedError:
try:
self.skipTest(sys.exc_info()[1])
except AttributeError:
return
except ProcessTimedOut:
self.assertTrue(i > 3)
else:
self.fail("Expected a timeout")
def test_iter_lines_error(self):
with self._connect() as rem:
try:
for i, lines in enumerate(rem["ls"]["--bla"].popen()):
pass
self.assertEqual(i, 1)
except ProcessExecutionError:
ex = sys.exc_info()[1]
self.assertTrue(ex.stderr.startswith("/bin/ls: "))
else:
self.fail("Expected an execution error")
@skip_on_windows
class RemoteMachineTest(unittest.TestCase, BaseRemoteMachineTest):
def _connect(self):
return SshMachine(TEST_HOST)
def test_tunnel(self):
with self._connect() as rem:
p = (rem.python["-u"] << self.TUNNEL_PROG).popen()
try:
port = int(p.stdout.readline().decode("ascii").strip())
except ValueError:
print(p.communicate())
raise
with rem.tunnel(12222, port) as tun:
s = socket.socket()
s.connect(("localhost", 12222))
s.send(six.b("world"))
data = s.recv(100)
s.close()
print(p.communicate())
self.assertEqual(data, b"hello world")
def test_get(self):
with self._connect() as rem:
self.assertEqual(str(rem['ls']),str(rem.get('ls')))
self.assertEqual(str(rem['ls']),str(rem.get('not_a_valid_process_234','ls')))
self.assertTrue('ls' in rem)
self.assertFalse('not_a_valid_process_234' in rem)
def test_list_processes(self):
with self._connect() as rem:
self.assertTrue(list(rem.list_processes()))
def test_pgrep(self):
with self._connect() as rem:
self.assertTrue(list(rem.pgrep("ssh")))
def test_nohup(self):
with self._connect() as rem:
sleep = rem["sleep"]
sleep["5.793817"] & NOHUP(stdout = None, append=False)
time.sleep(.5)
print(rem["ps"]("aux"))
self.assertTrue(list(rem.pgrep("5.793817")))
time.sleep(6)
self.assertFalse(list(rem.pgrep("5.793817")))
def test_bound_env(self):
with self._connect() as rem:
printenv = rem["printenv"]
with rem.env(FOO = "hello"):
self.assertEqual(printenv.with_env(BAR = "world")("FOO"), "hello\n")
self.assertEqual(printenv.with_env(BAR = "world")("BAR"), "world\n")
self.assertEqual(printenv.with_env(FOO = "sea", BAR = "world")("FOO"), "sea\n")
self.assertEqual(printenv.with_env(FOO = "sea", BAR = "world")("BAR"), "world\n")
@skipIf('useradd' not in local, "System does not have useradd (Mac?)")
def test_sshpass(self):
with local.as_root():
local["useradd"]("-m", "-b", "/tmp", "testuser")
try:
with local.as_root():
try:
(local["passwd"] << "123456")("--stdin", "testuser")
except ProcessExecutionError:
# some versions of passwd don't support --stdin, nothing to do in this case
logging.warn("passwd failed")
return
with SshMachine("localhost", user = "testuser", password = "123456") as rem:
self.assertEqual(rem["pwd"]().strip(), "/tmp/testuser")
finally:
with local.as_root():
local["userdel"]("-r", "testuser")
try:
import paramiko
except ImportError:
print("Paramiko not avilable")
else:
from plumbum.machines.paramiko_machine import ParamikoMachine
@skip_on_windows
class TestParamikoMachine(unittest.TestCase, BaseRemoteMachineTest):
def _connect(self):
return ParamikoMachine(TEST_HOST, missing_host_policy = paramiko.AutoAddPolicy())
def test_tunnel(self):
with self._connect() as rem:
p = rem.python["-c", self.TUNNEL_PROG].popen()
try:
port = int(p.stdout.readline().strip())
except ValueError:
print(p.communicate())
raise
s = rem.connect_sock(port)
s.send(b"world")
data = s.recv(100)
s.close()
print(p.communicate())
self.assertEqual(data, b"hello world")
def test_piping(self):
with self._connect() as rem:
try:
cmd = rem["ls"] | rem["cat"]
except NotImplementedError:
pass
else:
assert False, "Should not pipe"
if __name__ == "__main__":
unittest.main()
|
from rospy import init_node, Subscriber, Publisher, get_param
from rospy import Rate, is_shutdown, ROSInterruptException, spin, on_shutdown
from barc.msg import ECU
from numpy import pi
import rospy
import time
motor_pwm = 1500
servo_pwm = 1580
def arduino_interface():
global ecu_pub, motor_pwm, servo_pwm
init_node('arduino_interface')
# set node rate
loop_rate = 50
dt = 1.0 / loop_rate
rate = rospy.Rate(loop_rate)
time_prev = time.time()
ecu_pub = Publisher('ecu_pwm', ECU, queue_size = 10)
while not rospy.is_shutdown():
if time.time() >= time_prev and time.time() < time_prev + 7:
motor_pwm = 1620
if time.time() >= time_prev + 5 and time.time() < time_prev + 12:
motor_pwm = 1440 #1465
#if time.time() >= time_prev + 5 and time.time() < time_prev + 10:
# motor_pwm = 84.0
#if time.time() >= time_prev + 12 and time.time() < time_prev + 17:
# motor_pwm = 86.0
if time.time() >= time_prev + 17:
motor_pwm = 1500
ecu_cmd = ECU(motor_pwm, servo_pwm)
ecu_pub.publish(ecu_cmd)
break
ecu_cmd = ECU(motor_pwm, servo_pwm)
ecu_pub.publish(ecu_cmd)
# wait
rate.sleep()
if __name__ == '__main__':
try:
arduino_interface()
except ROSInterruptException:
pass
|
from __future__ import unicode_literals
DATE_FORMAT = r'\N\gà\y d \t\há\n\g n \nă\m Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'H:i \N\gà\y d \t\há\n\g n \nă\m Y'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'H:i d-m-Y'
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
|
import json
import logging
import requests
from requests.auth import HTTPBasicAuth
log = logging.getLogger(__name__)
class NoGithubCredentials(Exception):
pass
class BasicAuthRequester(object):
"""
Object used for issuing authenticated API calls.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def get_auth(self):
return HTTPBasicAuth(self.username, self.password)
def get(self, url):
log.debug("Fetching %s", url)
response = requests.get(url, auth=self.get_auth())
if response.status_code > 400:
log.warning("Error on GET to %s. Response: %s", url,
response.content)
return response
def delete(self, url):
log.debug("Deleting %s", url)
return requests.delete(url, auth=self.get_auth())
def post(self, url, payload):
log.debug("Posting %s to %s", payload, url)
response = requests.post(url, data=json.dumps(payload),
auth=self.get_auth())
if response.status_code > 400:
log.warning("Error on POST to %s. Response: %s", url,
response.content)
return response
|
__author__ = "mozman <mozman@gmx.at>"
def normalize_dxf_chunk(dxfstr):
def round_floats_but_not_ints(tag, places=7):
try:
return int(tag)
except ValueError:
pass
try:
value = float(tag)
return round(value, places)
except ValueError:
return tag
return [round_floats_but_not_ints(tag) for tag in dxfstr.split('\n')]
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import os
import bz2
import matplotlib.pylab as plt
imagefile = bz2.BZ2File('data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(imagefile)
if os.environ.get('READTHEDOCS', None) == 'True':
IM = IM[::2,::2]
un = [0, 2] # spherical harmonic orders
proj_angles = np.arange(0, 2*np.pi, np.pi/20) # projection angles
smoothing = 0.9 # smoothing Gaussian 1/e width
threshold = 0.01 # exclude small amplitude Newton spheres
radial_step = 1
clip = 0
LIM = abel.Transform(IM, method="linbasex", origin="convolution",
center_options=dict(square=True),
transform_options=dict(basis_dir=None, return_Beta=True,
legendre_orders=un,
proj_angles=proj_angles,
smoothing=smoothing,
radial_step=radial_step, clip=clip,
threshold=threshold))
radial = LIM.radial
speed = LIM.Beta[0]
anisotropy = LIM.Beta[1]
speed /= speed[200:].max() # exclude transform noise near centerline of image
fig = plt.figure(figsize=(11, 5))
ax1 = plt.subplot2grid((1, 2), (0, 0))
ax2 = plt.subplot2grid((1, 2), (0, 1))
inv_IM = LIM.transform
cols = inv_IM.shape[1]
c2 = cols//2
vmax = IM[:, :c2-100].max()
inv_IM *= vmax/inv_IM[:, c2+100:].max()
JIM = np.concatenate((IM[:, :c2], inv_IM[:, c2:]), axis=1)
im1 = ax1.imshow(JIM, origin='upper', aspect='auto', vmin=0, vmax=vmax)
ax1.set_xlabel('column (pixels)')
ax1.set_ylabel('row (pixels)')
ax1.set_title('VMI, inverse Abel: {:d}x{:d}'.format(*inv_IM.shape),
fontsize='small')
ax2.plot(radial, speed, label='speed')
ax2.plot(radial, speed*anisotropy, label=r'anisotropy $\times$ speed')
ax2.set_xlabel('radial pixel')
row, cols = IM.shape
ax2.axis(xmin=100*cols/1024, xmax=500*cols/1024, ymin=-1.5, ymax=1.8)
ax2.set_title("speed, anisotropy parameter", fontsize='small')
ax2.set_ylabel('intensity')
ax2.set_xlabel('radial coordinate (pixels)')
plt.legend(loc='best', frameon=False, labelspacing=0.1, fontsize='small')
plt.suptitle(
r'linbasex inverse Abel transform of O$_{2}{}^{-}$ electron velocity-map image',
fontsize='larger')
plt.savefig("plot_example_linbasex.png", dpi=100)
plt.show()
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/quenker/shared_lair_quenker_grassland.iff"
result.attribute_template_id = -1
result.stfName("lair_n","quenker_grassland")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from msrest.serialization import Model
class Sku(Model):
"""The SKU of the cognitive services account.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Gets or sets the sku name. Required for account creation,
optional for update. Possible values include: 'F0', 'P0', 'P1', 'P2',
'S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6'
:type name: str or ~azure.mgmt.cognitiveservices.models.SkuName
:ivar tier: Gets the sku tier. This is based on the SKU name. Possible
values include: 'Free', 'Standard', 'Premium'
:vartype tier: str or ~azure.mgmt.cognitiveservices.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'SkuTier'},
}
def __init__(self, name):
self.name = name
self.tier = None
|
""" Functions dealing with execute commands. """
import argparse
def execute_module_command(module, command='', command_line=[], *args, **kwargs):
"""
Executes the given command line in the given module.
module the module command to execute
command_line list of strings to be parsed as a command line
interactive whether the program is running in interactive mode
"""
module_commands = getattr(module, 'commands', [])
module_description = getattr(module, 'description', '')
module_arguments = getattr(module, 'arguments', [])
module_execute = getattr(module, 'execute', None)
interactive = kwargs.get('interactive', False)
# If running in interactive mode, ignore the dashed commands (e.g. -h)
if interactive:
module_commands = list(filter(lambda c: not c.startswith('-'), module_commands))
# If the command can be handled by the module, parse the command line
# and execute the command in the context of the parsed arguments.
if module_execute and command in module_commands:
parser = argparse.ArgumentParser(prog=module_commands[0], description=module_description)
for arg in module_arguments:
parser.add_argument(*arg[0], **arg[1])
parsed_args = parser.parse_args(command_line)
return module_execute(parsed_args=parsed_args, *args, **kwargs)
return None
|
from __future__ import absolute_import
import sys
import decimal
import datetime
import codecs
import re
import collections
import contextlib
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from . import mcc
odict = collections
try:
from bs4 import BeautifulSoup
def soup_maker(fh):
return BeautifulSoup(fh, 'html.parser')
except ImportError:
from BeautifulSoup import BeautifulStoneSoup
soup_maker = BeautifulStoneSoup
def try_decode(string, encoding):
if hasattr(string, 'decode'):
string = string.decode(encoding)
return string
def is_iterable(candidate):
if sys.version_info < (2, 6):
return hasattr(candidate, 'next')
return isinstance(candidate, Iterable)
@contextlib.contextmanager
def save_pos(fh):
"""
Save the position of the file handle, seek to the beginning, and
then restore the position.
"""
orig_pos = fh.tell()
fh.seek(0)
try:
yield fh
finally:
fh.seek(orig_pos)
class OfxFile(object):
def __init__(self, fh):
"""
fh should be a seekable file-like byte stream object
"""
self.headers = odict.OrderedDict()
self.fh = fh
if not is_iterable(self.fh):
return
if not hasattr(self.fh, "seek"):
return # fh is not a file object, we're doomed.
# If the file handler is text stream, convert to bytes one:
first = self.fh.read(1)
self.fh.seek(0)
if not isinstance(first, bytes):
self.fh = six.BytesIO(six.b(self.fh.read()))
with save_pos(self.fh):
self.read_headers()
self.handle_encoding()
self.replace_NONE_headers()
def read_headers(self):
head_data = self.fh.read(1024 * 10)
head_data = head_data[:head_data.find(six.b('<'))]
for line in head_data.splitlines():
# Newline?
if line.strip() == six.b(""):
break
header, value = line.split(six.b(":"))
header, value = header.strip().upper(), value.strip()
self.headers[header] = value
def handle_encoding(self):
"""
Decode the headers and wrap self.fh in a decoder such that it
subsequently returns only text.
"""
# decode the headers using ascii
ascii_headers = odict.OrderedDict(
(
key.decode('ascii', 'replace'),
value.decode('ascii', 'replace'),
)
for key, value in six.iteritems(self.headers)
)
enc_type = ascii_headers.get('ENCODING')
if not enc_type:
# no encoding specified, use the ascii-decoded headers
self.headers = ascii_headers
# decode the body as ascii as well
self.fh = codecs.lookup('ascii').streamreader(self.fh)
return
if enc_type == "USASCII":
cp = ascii_headers.get("CHARSET", "1252")
if cp == "8859-1":
encoding = "iso-8859-1"
else:
encoding = "cp%s" % (cp, )
elif enc_type in ("UNICODE", "UTF-8"):
encoding = "utf-8"
codec = codecs.lookup(encoding)
self.fh = codec.streamreader(self.fh)
# Decode the headers using the encoding
self.headers = odict.OrderedDict(
(key.decode(encoding), value.decode(encoding))
for key, value in six.iteritems(self.headers)
)
def replace_NONE_headers(self):
"""
Any headers that indicate 'none' should be replaced with Python
None values
"""
for header in self.headers:
if self.headers[header].upper() == 'NONE':
self.headers[header] = None
class OfxPreprocessedFile(OfxFile):
def __init__(self, fh):
super(OfxPreprocessedFile, self).__init__(fh)
if self.fh is None:
return
ofx_string = self.fh.read()
# find all closing tags as hints
closing_tags = [t.upper() for t in re.findall(r'(?i)</([a-z0-9_\.]+)>',
ofx_string)]
# close all tags that don't have closing tags and
# leave all other data intact
last_open_tag = None
tokens = re.split(r'(?i)(</?[a-z0-9_\.]+>)', ofx_string)
new_fh = StringIO()
for token in tokens:
is_closing_tag = token.startswith('</')
is_processing_tag = token.startswith('<?')
is_cdata = token.startswith('<!')
is_tag = token.startswith('<') and not is_cdata
is_open_tag = is_tag and not is_closing_tag \
and not is_processing_tag
if is_tag:
if last_open_tag is not None:
new_fh.write("</%s>" % last_open_tag)
last_open_tag = None
if is_open_tag:
tag_name = re.findall(r'(?i)<([a-z0-9_\.]+)>', token)[0]
if tag_name.upper() not in closing_tags:
last_open_tag = tag_name
new_fh.write(token)
new_fh.seek(0)
self.fh = new_fh
class Ofx(object):
def __str__(self):
return ""
class AccountType(object):
(Unknown, Bank, CreditCard, Investment) = range(0, 4)
class Account(object):
def __init__(self):
self.curdef = None
self.statement = None
self.account_id = ''
self.routing_number = ''
self.branch_id = ''
self.account_type = ''
self.institution = None
self.type = AccountType.Unknown
# Used for error tracking
self.warnings = []
@property
def number(self):
# For backwards compatibility. Remove in version 1.0.
return self.account_id
class InvestmentAccount(Account):
def __init__(self):
super(InvestmentAccount, self).__init__()
self.brokerid = ''
class BrokerageBalance:
def __init__(self):
self.name = None
self.description = None
self.value = None # decimal
class Security:
def __init__(self, uniqueid, name, ticker, memo):
self.uniqueid = uniqueid
self.name = name
self.ticker = ticker
self.memo = memo
class Signon:
def __init__(self, keys):
self.code = keys['code']
self.severity = keys['severity']
self.message = keys['message']
self.dtserver = keys['dtserver']
self.language = keys['language']
self.dtprofup = keys['dtprofup']
self.fi_org = keys['org']
self.fi_fid = keys['fid']
self.intu_bid = keys['intu.bid']
if int(self.code) == 0:
self.success = True
else:
self.success = False
def __str__(self):
ret = "\t<SIGNONMSGSRSV1>\r\n" + "\t\t<SONRS>\r\n" + \
"\t\t\t<STATUS>\r\n"
ret += "\t\t\t\t<CODE>%s\r\n" % self.code
ret += "\t\t\t\t<SEVERITY>%s\r\n" % self.severity
if self.message:
ret += "\t\t\t\t<MESSAGE>%s\r\n" % self.message
ret += "\t\t\t</STATUS>\r\n"
if self.dtserver is not None:
ret += "\t\t\t<DTSERVER>" + self.dtserver + "\r\n"
if self.language is not None:
ret += "\t\t\t<LANGUAGE>" + self.language + "\r\n"
if self.dtprofup is not None:
ret += "\t\t\t<DTPROFUP>" + self.dtprofup + "\r\n"
if (self.fi_org is not None) or (self.fi_fid is not None):
ret += "\t\t\t<FI>\r\n"
if self.fi_org is not None:
ret += "\t\t\t\t<ORG>" + self.fi_org + "\r\n"
if self.fi_fid is not None:
ret += "\t\t\t\t<FID>" + self.fi_fid + "\r\n"
ret += "\t\t\t</FI>\r\n"
if self.intu_bid is not None:
ret += "\t\t\t<INTU.BID>" + self.intu_bid + "\r\n"
ret += "\t\t</SONRS>\r\n"
ret += "\t</SIGNONMSGSRSV1>\r\n"
return ret
class Statement(object):
def __init__(self):
self.start_date = ''
self.end_date = ''
self.currency = ''
self.transactions = []
# Error tracking:
self.discarded_entries = []
self.warnings = []
class InvestmentStatement(object):
def __init__(self):
self.positions = []
self.transactions = []
# Error tracking:
self.discarded_entries = []
self.warnings = []
class Transaction(object):
def __init__(self):
self.payee = ''
self.type = ''
self.date = None
self.user_date = None
self.amount = None
self.id = ''
self.memo = ''
self.sic = None
self.mcc = ''
self.checknum = ''
def __repr__(self):
return "<Transaction units=" + str(self.amount) + ">"
class InvestmentTransaction(object):
AGGREGATE_TYPES = ['buydebt', 'buymf', 'buyopt', 'buyother',
'buystock', 'closureopt', 'income',
'invexpense', 'jrnlfund', 'jrnlsec',
'margininterest', 'reinvest', 'retofcap',
'selldebt', 'sellmf', 'sellopt', 'sellother',
'sellstock', 'split', 'transfer']
def __init__(self, type):
self.type = type.lower()
self.tradeDate = None
self.settleDate = None
self.memo = ''
self.security = ''
self.income_type = ''
self.units = decimal.Decimal(0)
self.unit_price = decimal.Decimal(0)
self.commission = decimal.Decimal(0)
self.fees = decimal.Decimal(0)
self.total = decimal.Decimal(0)
self.tferaction = None
def __repr__(self):
return "<InvestmentTransaction type=" + str(self.type) + ", \
units=" + str(self.units) + ">"
class Position(object):
def __init__(self):
self.security = ''
self.units = decimal.Decimal(0)
self.unit_price = decimal.Decimal(0)
self.market_value = decimal.Decimal(0)
class Institution(object):
def __init__(self):
self.organization = ''
self.fid = ''
class OfxParserException(Exception):
pass
class OfxParser(object):
@classmethod
def parse(cls, file_handle, fail_fast=True, custom_date_format=None):
'''
parse is the main entry point for an OfxParser. It takes a file
handle and an optional log_errors flag.
If fail_fast is True, the parser will fail on any errors.
If fail_fast is False, the parser will log poor statements in the
statement class and continue to run. Note: the library does not
guarantee that no exceptions will be raised to the caller, only
that statements will include bad transactions (which are marked).
'''
cls.fail_fast = fail_fast
cls.custom_date_format = custom_date_format
if not hasattr(file_handle, 'seek'):
raise TypeError(six.u('parse() accepts a seek-able file handle\
, not %s' % type(file_handle).__name__))
ofx_obj = Ofx()
# Store the headers
ofx_file = OfxPreprocessedFile(file_handle)
ofx_obj.headers = ofx_file.headers
ofx_obj.accounts = []
ofx_obj.signon = None
ofx = soup_maker(ofx_file.fh)
if ofx.find('ofx') is None:
raise OfxParserException('The ofx file is empty!')
sonrs_ofx = ofx.find('sonrs')
if sonrs_ofx:
ofx_obj.signon = cls.parseSonrs(sonrs_ofx)
stmttrnrs = ofx.find('stmttrnrs')
if stmttrnrs:
stmttrnrs_trnuid = stmttrnrs.find('trnuid')
if stmttrnrs_trnuid:
ofx_obj.trnuid = stmttrnrs_trnuid.contents[0].strip()
stmttrnrs_status = stmttrnrs.find('status')
if stmttrnrs_status:
ofx_obj.status = {}
ofx_obj.status['code'] = int(
stmttrnrs_status.find('code').contents[0].strip()
)
ofx_obj.status['severity'] = \
stmttrnrs_status.find('severity').contents[0].strip()
message = stmttrnrs_status.find('message')
ofx_obj.status['message'] = \
message.contents[0].strip() if message else None
ccstmttrnrs = ofx.find('ccstmttrnrs')
if ccstmttrnrs:
ccstmttrnrs_trnuid = ccstmttrnrs.find('trnuid')
if ccstmttrnrs_trnuid:
ofx_obj.trnuid = ccstmttrnrs_trnuid.contents[0].strip()
ccstmttrnrs_status = ccstmttrnrs.find('status')
if ccstmttrnrs_status:
ofx_obj.status = {}
ofx_obj.status['code'] = int(
ccstmttrnrs_status.find('code').contents[0].strip()
)
ofx_obj.status['severity'] = \
ccstmttrnrs_status.find('severity').contents[0].strip()
message = ccstmttrnrs_status.find('message')
ofx_obj.status['message'] = \
message.contents[0].strip() if message else None
stmtrs_ofx = ofx.findAll('stmtrs')
if stmtrs_ofx:
ofx_obj.accounts += cls.parseStmtrs(stmtrs_ofx, AccountType.Bank)
ccstmtrs_ofx = ofx.findAll('ccstmtrs')
if ccstmtrs_ofx:
ofx_obj.accounts += cls.parseStmtrs(
ccstmtrs_ofx, AccountType.CreditCard)
invstmtrs_ofx = ofx.findAll('invstmtrs')
if invstmtrs_ofx:
ofx_obj.accounts += cls.parseInvstmtrs(invstmtrs_ofx)
seclist_ofx = ofx.find('seclist')
if seclist_ofx:
ofx_obj.security_list = cls.parseSeclist(seclist_ofx)
else:
ofx_obj.security_list = None
acctinfors_ofx = ofx.find('acctinfors')
if acctinfors_ofx:
ofx_obj.accounts += cls.parseAcctinfors(acctinfors_ofx, ofx)
fi_ofx = ofx.find('fi')
if fi_ofx:
for account in ofx_obj.accounts:
account.institution = cls.parseOrg(fi_ofx)
if ofx_obj.accounts:
ofx_obj.account = ofx_obj.accounts[0]
return ofx_obj
@classmethod
def parseOfxDateTime(cls, ofxDateTime):
# dateAsString looks something like 20101106160000.00[-5:EST]
# for 6 Nov 2010 4pm UTC-5 aka EST
# Some places (e.g. Newfoundland) have non-integer offsets.
res = re.search(r"\[(?P<tz>[-+]?\d+\.?\d*)\:\w*\]$", ofxDateTime)
if res:
tz = float(res.group('tz'))
else:
tz = 0
timeZoneOffset = datetime.timedelta(hours=tz)
res = re.search(r"^[0-9]*\.([0-9]{0,5})", ofxDateTime)
if res:
msec = datetime.timedelta(seconds=float("0." + res.group(1)))
else:
msec = datetime.timedelta(seconds=0)
try:
local_date = datetime.datetime.strptime(ofxDateTime[:14], '%Y%m%d%H%M%S')
return local_date - timeZoneOffset + msec
except ValueError:
if ofxDateTime[:8] == "00000000":
return None
if not cls.custom_date_format:
return datetime.datetime.strptime(
ofxDateTime[:8], '%Y%m%d') - timeZoneOffset + msec
else:
return datetime.datetime.strptime(
ofxDateTime[:8], cls.custom_date_format) - timeZoneOffset + msec
@classmethod
def parseAcctinfors(cls, acctinfors_ofx, ofx):
all_accounts = []
for i in acctinfors_ofx.findAll('acctinfo'):
accounts = []
if i.find('invacctinfo'):
accounts += cls.parseInvstmtrs([i])
elif i.find('ccacctinfo'):
accounts += cls.parseStmtrs([i], AccountType.CreditCard)
elif i.find('bankacctinfo'):
accounts += cls.parseStmtrs([i], AccountType.Bank)
else:
continue
fi_ofx = ofx.find('fi')
if fi_ofx:
for account in all_accounts:
account.institution = cls.parseOrg(fi_ofx)
desc = i.find('desc')
if hasattr(desc, 'contents'):
for account in accounts:
account.desc = desc.contents[0].strip()
all_accounts += accounts
return all_accounts
@classmethod
def parseInvstmtrs(cls, invstmtrs_list):
ret = []
for invstmtrs_ofx in invstmtrs_list:
account = InvestmentAccount()
acctid_tag = invstmtrs_ofx.find('acctid')
if hasattr(acctid_tag, 'contents'):
try:
account.account_id = acctid_tag.contents[0].strip()
except IndexError:
account.warnings.append(
six.u("Empty acctid tag for %s") % invstmtrs_ofx)
if cls.fail_fast:
raise
brokerid_tag = invstmtrs_ofx.find('brokerid')
if hasattr(brokerid_tag, 'contents'):
try:
account.brokerid = brokerid_tag.contents[0].strip()
except IndexError:
account.warnings.append(
six.u("Empty brokerid tag for %s") % invstmtrs_ofx)
if cls.fail_fast:
raise
account.type = AccountType.Investment
if invstmtrs_ofx:
account.statement = cls.parseInvestmentStatement(
invstmtrs_ofx)
ret.append(account)
return ret
@classmethod
def parseSeclist(cls, seclist_ofx):
securityList = []
for secinfo_ofx in seclist_ofx.findAll('secinfo'):
uniqueid_tag = secinfo_ofx.find('uniqueid')
name_tag = secinfo_ofx.find('secname')
ticker_tag = secinfo_ofx.find('ticker')
memo_tag = secinfo_ofx.find('memo')
if uniqueid_tag and name_tag:
try:
ticker = ticker_tag.contents[0].strip()
except AttributeError:
# ticker can be empty
ticker = None
try:
memo = memo_tag.contents[0].strip()
except AttributeError:
# memo can be empty
memo = None
securityList.append(
Security(uniqueid_tag.contents[0].strip(),
name_tag.contents[0].strip(),
ticker,
memo))
return securityList
@classmethod
def parseInvestmentPosition(cls, ofx):
position = Position()
tag = ofx.find('uniqueid')
if hasattr(tag, 'contents'):
position.security = tag.contents[0].strip()
tag = ofx.find('units')
if hasattr(tag, 'contents'):
position.units = cls.toDecimal(tag)
tag = ofx.find('unitprice')
if hasattr(tag, 'contents'):
position.unit_price = cls.toDecimal(tag)
tag = ofx.find('mktval')
if hasattr(tag, 'contents'):
position.market_value = cls.toDecimal(tag)
tag = ofx.find('dtpriceasof')
if hasattr(tag, 'contents'):
try:
position.date = cls.parseOfxDateTime(tag.contents[0].strip())
except ValueError:
raise
return position
@classmethod
def parseInvestmentTransaction(cls, ofx):
transaction = InvestmentTransaction(ofx.name)
tag = ofx.find('fitid')
if hasattr(tag, 'contents'):
transaction.id = tag.contents[0].strip()
tag = ofx.find('memo')
if hasattr(tag, 'contents'):
transaction.memo = tag.contents[0].strip()
tag = ofx.find('dttrade')
if hasattr(tag, 'contents'):
try:
transaction.tradeDate = cls.parseOfxDateTime(
tag.contents[0].strip())
except ValueError:
raise
tag = ofx.find('dtsettle')
if hasattr(tag, 'contents'):
try:
transaction.settleDate = cls.parseOfxDateTime(
tag.contents[0].strip())
except ValueError:
raise
tag = ofx.find('uniqueid')
if hasattr(tag, 'contents'):
transaction.security = tag.contents[0].strip()
tag = ofx.find('incometype')
if hasattr(tag, 'contents'):
transaction.income_type = tag.contents[0].strip()
tag = ofx.find('units')
if hasattr(tag, 'contents'):
transaction.units = cls.toDecimal(tag)
tag = ofx.find('unitprice')
if hasattr(tag, 'contents'):
transaction.unit_price = cls.toDecimal(tag)
tag = ofx.find('commission')
if hasattr(tag, 'contents'):
transaction.commission = cls.toDecimal(tag)
tag = ofx.find('fees')
if hasattr(tag, 'contents'):
transaction.fees = cls.toDecimal(tag)
tag = ofx.find('total')
if hasattr(tag, 'contents'):
transaction.total = cls.toDecimal(tag)
tag = ofx.find('inv401ksource')
if hasattr(tag, 'contents'):
transaction.inv401ksource = tag.contents[0].strip()
tag = ofx.find('tferaction')
if hasattr(tag, 'contents'):
transaction.tferaction = tag.contents[0].strip()
return transaction
@classmethod
def parseInvestmentStatement(cls, invstmtrs_ofx):
statement = InvestmentStatement()
currency_tag = invstmtrs_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
statement.currency = currency_tag.contents[0].strip().lower()
invtranlist_ofx = invstmtrs_ofx.find('invtranlist')
if invtranlist_ofx is not None:
tag = invtranlist_ofx.find('dtstart')
if hasattr(tag, 'contents'):
try:
statement.start_date = cls.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty start date.'))
if cls.fail_fast:
raise
except ValueError:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid start date:\
%s') % e)
if cls.fail_fast:
raise
tag = invtranlist_ofx.find('dtend')
if hasattr(tag, 'contents'):
try:
statement.end_date = cls.parseOfxDateTime(
tag.contents[0].strip())
except IndexError:
statement.warnings.append(six.u('Empty end date.'))
except ValueError:
e = sys.exc_info()[1]
statement.warnings.append(six.u('Invalid end date: \
%s') % e)
if cls.fail_fast:
raise
for transaction_type in ['posmf', 'posstock', 'posopt', 'posother',
'posdebt']:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.positions.append(
cls.parseInvestmentPosition(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation,
TypeError):
e = sys.exc_info()[1]
if cls.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): six.u("Error parsing positions: \
") + str(e), six.u('content'): investment_ofx}
)
for transaction_type in InvestmentTransaction.AGGREGATE_TYPES:
try:
for investment_ofx in invstmtrs_ofx.findAll(transaction_type):
statement.transactions.append(
cls.parseInvestmentTransaction(investment_ofx))
except (ValueError, IndexError, decimal.InvalidOperation):
e = sys.exc_info()[1]
if cls.fail_fast:
raise
statement.discarded_entries.append(
{six.u('error'): transaction_type + ": " + str(e),
six.u('content'): investment_ofx}
)
for transaction_ofx in invstmtrs_ofx.findAll('invbanktran'):
for stmt_ofx in transaction_ofx.findAll('stmttrn'):
try:
statement.transactions.append(
cls.parseTransaction(stmt_ofx))
except OfxParserException:
ofxError = sys.exc_info()[1]
statement.discarded_entries.append(
{'error': str(ofxError), 'content': transaction_ofx})
if cls.fail_fast:
raise
invbal_ofx = invstmtrs_ofx.find('invbal')
if invbal_ofx is not None:
# <AVAILCASH>18073.98<MARGINBALANCE>+00000000000.00<SHORTBALANCE>+00000000000.00<BUYPOWER>+00000000000.00
availcash_ofx = invbal_ofx.find('availcash')
if availcash_ofx is not None:
statement.available_cash = cls.toDecimal(availcash_ofx)
margin_balance_ofx = invbal_ofx.find('marginbalance')
if margin_balance_ofx is not None:
statement.margin_balance = cls.toDecimal(margin_balance_ofx)
short_balance_ofx = invbal_ofx.find('shortbalance')
if short_balance_ofx is not None:
statement.short_balance = cls.toDecimal(short_balance_ofx)
buy_power_ofx = invbal_ofx.find('buypower')
if buy_power_ofx is not None:
statement.buy_power = cls.toDecimal(buy_power_ofx)
ballist_ofx = invbal_ofx.find('ballist')
if ballist_ofx is not None:
statement.balance_list = []
for balance_ofx in ballist_ofx.findAll('bal'):
brokerage_balance = BrokerageBalance()
name_ofx = balance_ofx.find('name')
if name_ofx is not None:
brokerage_balance.name = name_ofx.contents[0].strip()
description_ofx = balance_ofx.find('desc')
if description_ofx is not None:
brokerage_balance.description = \
description_ofx.contents[0].strip()
value_ofx = balance_ofx.find('value')
if value_ofx is not None:
brokerage_balance.value = cls.toDecimal(value_ofx)
statement.balance_list.append(brokerage_balance)
return statement
@classmethod
def parseOrg(cls, fi_ofx):
institution = Institution()
org = fi_ofx.find('org')
if hasattr(org, 'contents'):
institution.organization = org.contents[0].strip()
fid = fi_ofx.find('fid')
if hasattr(fid, 'contents'):
institution.fid = fid.contents[0].strip()
return institution
@classmethod
def parseSonrs(cls, sonrs):
items = [
'code',
'severity',
'dtserver',
'language',
'dtprofup',
'org',
'fid',
'intu.bid',
'message'
]
idict = {}
for i in items:
try:
idict[i] = sonrs.find(i).contents[0].strip()
except Exception:
idict[i] = None
idict['code'] = int(idict['code'])
if idict['message'] is None:
idict['message'] = ''
return Signon(idict)
@classmethod
def parseStmtrs(cls, stmtrs_list, accountType):
''' Parse the <STMTRS> tags and return a list of Accounts object. '''
ret = []
for stmtrs_ofx in stmtrs_list:
account = Account()
act_curdef = stmtrs_ofx.find('curdef')
if act_curdef and act_curdef.contents:
account.curdef = act_curdef.contents[0].strip()
acctid_tag = stmtrs_ofx.find('acctid')
if acctid_tag and acctid_tag.contents:
account.account_id = acctid_tag.contents[0].strip()
bankid_tag = stmtrs_ofx.find('bankid')
if bankid_tag and bankid_tag.contents:
account.routing_number = bankid_tag.contents[0].strip()
branchid_tag = stmtrs_ofx.find('branchid')
if branchid_tag and branchid_tag.contents:
account.branch_id = branchid_tag.contents[0].strip()
type_tag = stmtrs_ofx.find('accttype')
if type_tag and type_tag.contents:
account.account_type = type_tag.contents[0].strip()
account.type = accountType
if stmtrs_ofx:
account.statement = cls.parseStatement(stmtrs_ofx)
ret.append(account)
return ret
@classmethod
def parseBalance(cls, statement, stmt_ofx, bal_tag_name, bal_attr,
bal_date_attr, bal_type_string):
bal_tag = stmt_ofx.find(bal_tag_name)
if hasattr(bal_tag, "contents"):
balamt_tag = bal_tag.find('balamt')
dtasof_tag = bal_tag.find('dtasof')
if hasattr(balamt_tag, "contents"):
try:
setattr(statement, bal_attr, cls.toDecimal(balamt_tag))
except (IndexError, decimal.InvalidOperation):
statement.warnings.append(
six.u("%s balance amount was empty for \
%s") % (bal_type_string, stmt_ofx))
if cls.fail_fast:
raise OfxParserException("Empty %s balance\
" % bal_type_string)
if hasattr(dtasof_tag, "contents"):
try:
setattr(statement, bal_date_attr, cls.parseOfxDateTime(
dtasof_tag.contents[0].strip()))
except IndexError:
statement.warnings.append(
six.u("%s balance date was empty for %s\
") % (bal_type_string, stmt_ofx))
if cls.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("%s balance date was not allowed for \
%s") % (bal_type_string, stmt_ofx))
if cls.fail_fast:
raise
@classmethod
def parseStatement(cls, stmt_ofx):
'''
Parse a statement in ofx-land and return a Statement object.
'''
statement = Statement()
dtstart_tag = stmt_ofx.find('dtstart')
if hasattr(dtstart_tag, "contents"):
try:
statement.start_date = cls.parseOfxDateTime(
dtstart_tag.contents[0].strip())
except IndexError:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls.fail_fast:
raise
except ValueError:
statement.warnings.append(
six.u("Statement start date was not allowed for \
%s") % stmt_ofx)
if cls.fail_fast:
raise
dtend_tag = stmt_ofx.find('dtend')
if hasattr(dtend_tag, "contents"):
try:
statement.end_date = cls.parseOfxDateTime(
dtend_tag.contents[0].strip())
except IndexError:
statement.warnings.append(
six.u("Statement start date was empty for %s") % stmt_ofx)
if cls.fail_fast:
raise
except ValueError:
msg = six.u("Statement start date was not formatted "
"correctly for %s")
statement.warnings.append(msg % stmt_ofx)
if cls.fail_fast:
raise
except TypeError:
statement.warnings.append(
six.u("Statement start date was not allowed for \
%s") % stmt_ofx)
if cls.fail_fast:
raise
currency_tag = stmt_ofx.find('curdef')
if hasattr(currency_tag, "contents"):
try:
statement.currency = currency_tag.contents[0].strip().lower()
except IndexError:
statement.warnings.append(
six.u("Currency definition was empty for %s") % stmt_ofx)
if cls.fail_fast:
raise
cls.parseBalance(statement, stmt_ofx, 'ledgerbal',
'balance', 'balance_date', 'ledger')
cls.parseBalance(statement, stmt_ofx, 'availbal', 'available_balance',
'available_balance_date', 'ledger')
for transaction_ofx in stmt_ofx.findAll('stmttrn'):
try:
statement.transactions.append(
cls.parseTransaction(transaction_ofx))
except OfxParserException:
ofxError = sys.exc_info()[1]
statement.discarded_entries.append(
{'error': str(ofxError), 'content': transaction_ofx})
if cls.fail_fast:
raise
return statement
@classmethod
def parseTransaction(cls, txn_ofx):
'''
Parse a transaction in ofx-land and return a Transaction object.
'''
transaction = Transaction()
type_tag = txn_ofx.find('trntype')
if hasattr(type_tag, 'contents'):
try:
transaction.type = type_tag.contents[0].lower().strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction type"))
except TypeError:
raise OfxParserException(
six.u("No Transaction type (a required field)"))
name_tag = txn_ofx.find('name')
if hasattr(name_tag, "contents"):
try:
transaction.payee = name_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction name"))
except TypeError:
raise OfxParserException(
six.u("No Transaction name (a required field)"))
memo_tag = txn_ofx.find('memo')
if hasattr(memo_tag, "contents"):
try:
transaction.memo = memo_tag.contents[0].strip()
except IndexError:
# Memo can be empty.
pass
except TypeError:
pass
amt_tag = txn_ofx.find('trnamt')
if hasattr(amt_tag, "contents"):
try:
transaction.amount = cls.toDecimal(amt_tag)
except IndexError:
raise OfxParserException("Invalid Transaction Date")
except decimal.InvalidOperation:
# Some banks use a null transaction for including interest
# rate changes on your statement.
if amt_tag.contents[0].strip() in ('null', '-null'):
transaction.amount = 0
else:
raise OfxParserException(
six.u("Invalid Transaction Amount: '%s'") % amt_tag.contents[0])
except TypeError:
raise OfxParserException(
six.u("No Transaction Amount (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Amount (a required field)"))
date_tag = txn_ofx.find('dtposted')
if hasattr(date_tag, "contents"):
try:
transaction.date = cls.parseOfxDateTime(
date_tag.contents[0].strip())
except IndexError:
raise OfxParserException("Invalid Transaction Date")
except ValueError:
ve = sys.exc_info()[1]
raise OfxParserException(str(ve))
except TypeError:
raise OfxParserException(
six.u("No Transaction Date (a required field)"))
else:
raise OfxParserException(
six.u("Missing Transaction Date (a required field)"))
user_date_tag = txn_ofx.find('dtuser')
if hasattr(user_date_tag, "contents"):
try:
transaction.user_date = cls.parseOfxDateTime(
user_date_tag.contents[0].strip())
except IndexError:
raise OfxParserException("Invalid Transaction User Date")
except ValueError:
ve = sys.exc_info()[1]
raise OfxParserException(str(ve))
except TypeError:
pass
id_tag = txn_ofx.find('fitid')
if hasattr(id_tag, "contents"):
try:
transaction.id = id_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty FIT id (a required \
field)"))
except TypeError:
raise OfxParserException(six.u("No FIT id (a required field)"))
else:
raise OfxParserException(six.u("Missing FIT id (a required \
field)"))
sic_tag = txn_ofx.find('sic')
if hasattr(sic_tag, 'contents'):
try:
transaction.sic = sic_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty transaction Standard \
Industry Code (SIC)"))
if transaction.sic is not None and transaction.sic in mcc.codes:
try:
transaction.mcc = mcc.codes.get(transaction.sic, '').get('combined \
description')
except IndexError:
raise OfxParserException(six.u("Empty transaction Merchant Category \
Code (MCC)"))
except AttributeError:
if cls.fail_fast:
raise
checknum_tag = txn_ofx.find('checknum')
if hasattr(checknum_tag, 'contents'):
try:
transaction.checknum = checknum_tag.contents[0].strip()
except IndexError:
raise OfxParserException(six.u("Empty Check (or other reference) \
number"))
return transaction
@classmethod
def toDecimal(cls, tag):
d = tag.contents[0].strip()
# Handle 10,000.50 formatted numbers
if re.search(r'.*\..*,', d):
d = d.replace('.', '')
# Handle 10.000,50 formatted numbers
if re.search(r'.*,.*\.', d):
d = d.replace(',', '')
# Handle 10000,50 formatted numbers
if '.' not in d and ',' in d:
d = d.replace(',', '.')
# Handle 1 025,53 formatted numbers
d = d.replace(' ', '')
# Handle +1058,53 formatted numbers
d = d.replace('+', '')
return decimal.Decimal(d)
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/sullustan/shared_sul_hair_s15_f.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import SuitDNA
from SuitLegList import *
import SuitTimings
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from pandac.PandaModules import *
from pandac.PandaModules import Point3
from toontown.battle import SuitBattleGlobals
from toontown.toonbase import TTLocalizer
TIME_BUFFER_PER_WPT = 0.25
TIME_DIVISOR = 100
DISTRIBUTE_TASK_CREATION = 0
class SuitBase:
notify = DirectNotifyGlobal.directNotify.newCategory('SuitBase')
def __init__(self):
self.dna = None
self.level = 0
self.maxHP = 10
self.currHP = 10
self.isSkelecog = 0
self.isWaiter = 0
return
def delete(self):
if hasattr(self, 'legList'):
del self.legList
def getStyleName(self):
if hasattr(self, 'dna') and self.dna:
return self.dna.name
else:
self.notify.error('called getStyleName() before dna was set!')
return 'unknown'
def getStyleDept(self):
if hasattr(self, 'dna') and self.dna:
return SuitDNA.getDeptFullname(self.dna.dept)
else:
self.notify.error('called getStyleDept() before dna was set!')
return 'unknown'
def getLevel(self):
return self.level
def setLevel(self, level):
self.level = level
nameWLevel = TTLocalizer.SuitBaseNameWithLevel % {'name': self.name,
'dept': self.getStyleDept(),
'level': self.getActualLevel()}
self.setDisplayName(nameWLevel)
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
self.maxHP = attributes['hp'][self.level]
self.currHP = self.maxHP
def getSkelecog(self):
return self.isSkelecog
def setSkelecog(self, flag):
self.isSkelecog = flag
def setWaiter(self, flag):
self.isWaiter = flag
def getActualLevel(self):
if hasattr(self, 'dna'):
return SuitBattleGlobals.getActualFromRelativeLevel(self.getStyleName(), self.level) + 1
else:
self.notify.warning('called getActualLevel with no DNA, returning 1 for level')
return 1
def setPath(self, path):
self.path = path
self.pathLength = self.path.getNumPoints()
def getPath(self):
return self.path
def printPath(self):
print '%d points in path' % self.pathLength
for currPathPt in xrange(self.pathLength):
indexVal = self.path.getPointIndex(currPathPt)
print '\t', self.sp.dnaStore.getSuitPointWithIndex(indexVal)
def makeLegList(self):
self.legList = SuitLegList(self.path, self.sp.dnaStore)
|
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
generatedSpeciesConstraints(
maximumRadicalElectrons = 3,
)
species(
label='octane',
reactive=True,
structure=SMILES("C(CCCCC)CC"),
)
species(
label='oxygen',
reactive=True,
structure=SMILES("[O][O]"),
)
liquidReactor(
temperature=(500,'K'),
initialConcentrations={
"octane": (6.154e-3,'mol/cm^3'),
"oxygen": (4.953e-6,'mol/cm^3')
},
terminationTime=(5,'s'),
)
solvation(
solvent='octane'
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceKeepInEdge=1E-9,
toleranceMoveToCore=0.001,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=100000
)
options(
units='si',
saveRestartPeriod=None,
drawMolecules=False,
generatePlots=False,
saveSimulationProfiles=True,
)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=255),
),
]
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/inventory/shared_character_inventory.iff"
result.attribute_template_id = -1
result.stfName("item_n","inventory")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_xaan_talmaron_q3_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_dant_n","xaan_talmaron_q3_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import sys
import logging
from elftools.elf.elffile import ELFFile
from elftools.elf.descriptions import describe_p_type
import capstone
import okita.code_coverage as code_coverage
import okita.binary_disassembler as binary_disassembler
def linear_sweep_disassemble(base_addr, code, arch, mode):
# Dumb strategy, disass until:
# - end of code reached OR
# - ret or hlt instruction is met
disassembler = capstone.Cs(arch, mode)
for current_instruction in disassembler.disasm(code, base_addr):
print("%s\t%s" % (current_instruction.mnemonic, current_instruction.op_str))
def create_start_proc_region(content, base_address):
region = code_coverage.CodeRegion("_start", 0, base_address)
disassembler = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
for instruction in disassembler.disasm(content, 0x00000000):
region._size += instruction.size
if instruction.mnemonic == "ret" or instruction.mnemonic == "hlt":
break
return region
if len(sys.argv) < 2:
print("Usage: %s <binary>" % (sys.argv[0]))
sys.exit(-1)
def get_elf_information(elf_file):
base_address = 0
start_offset = 0
interp_segment = None
for segment in elf_file.iter_segments():
if(segment['p_type'] == 'PT_INTERP'):
interp_segment = (segment['p_vaddr'], segment['p_filesz'])
if(segment['p_type'] == 'PT_LOAD' and segment['p_offset'] == 0):
base_address = segment['p_vaddr']
if(elf_file['e_entry'] >= segment['p_vaddr'] and elf_file['e_entry'] < (segment['p_vaddr'] + segment['p_filesz'])):
start_offset = segment['p_offset'] + (elf_file['e_entry'] - base_address)
return (base_address, start_offset, interp_segment)
def elf_gen_code_coverage(elf_file):
elf_base_address, start_offset, interp_segment = get_elf_information(elf_file)
disasm = binary_disassembler.NaiveBinaryDisassembler(sys.argv[1])
# Covering the elf header
regions = [
code_coverage.Elf32EhdrRegion("header", size=elf_file['e_ehsize'], base_address=elf_base_address)
]
# Covering the program headers
number_of_program_headers = elf_file['e_phnum']
elf_program_header_size = 32
i = 0
base_address = elf_base_address + elf_file['e_ehsize']
while i < number_of_program_headers:
regions.append(
code_coverage.Elf32PhdrRegion(
"program_header_%d" % (i),
size=elf_program_header_size,
base_address=base_address
)
)
base_address += elf_program_header_size
i += 1
if interp_segment and interp_segment[0] == base_address:
regions.append(
code_coverage.ElfInterpRegion(
"interp_segment",
size=interp_segment[1],
base_address = base_address
)
)
base_address += interp_segment[1]
regions.append(
code_coverage.UnknownRegion(
"before_start",
size=(elf_base_address+start_offset)-base_address,
base_address=base_address
)
)
# start code until ret/hlt
regions.append(create_start_proc_region(binary_content[start_offset:], elf_file['e_entry']))
# rest of the code
regions.append(code_coverage.UnknownRegion("after_start", len(binary_content) - (start_offset + regions[-1].size), elf_file['e_entry'] + (start_offset +regions[-1].size)))
cover = code_coverage.CodeCoverage(disasm, regions, base_address=elf_base_address)
return cover
with open(sys.argv[1], "rb") as file_handle:
binary_content = file_handle.read()
elf_file = ELFFile(file_handle)
print("Creating code coverage...")
cover = elf_gen_code_coverage(elf_file)
print("Disassembling the file...")
cover.disassemble(binary_content)
print("Done.")
file_handle.close()
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cabotapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EmailAlert',
fields=[
('alertplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cabotapp.AlertPlugin')),
],
options={
'abstract': False,
},
bases=('cabotapp.alertplugin',),
),
]
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/fishing/fish/shared_blowfish.iff"
result.attribute_template_id = -1
result.stfName("fish_n","blowfish")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from __future__ import absolute_import, print_function
import logging
import requests
from requests.exceptions import Timeout
from threading import Thread
from time import sleep
import six
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson, urlencode_noplus
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'friends' in data:
if self.on_friends(data['friends']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
elif 'warning' in data:
if self.on_warning(data['warning']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_friends(self, friends):
"""Called when a friends list arrives.
friends is a list that contains user_id
"""
return
def on_limit(self, track):
"""Called when a limitation notice arrives"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
def on_warning(self, notice):
"""Called when a disconnection warning message arrives"""
return
class ReadBuffer(object):
"""Buffer data from the response in a smarter way than httplib/requests can.
Tweets are roughly in the 2-12kb range, averaging around 3kb.
Requests/urllib3/httplib/socket all use socket.read, which blocks
until enough data is returned. On some systems (eg google appengine), socket
reads are quite slow. To combat this latency we can read big chunks,
but the blocking part means we won't get results until enough tweets
have arrived. That may not be a big deal for high throughput systems.
For low throughput systems we don't want to sacrafice latency, so we
use small chunks so it can read the length and the tweet in 2 read calls.
"""
def __init__(self, stream, chunk_size):
self._stream = stream
self._buffer = u""
self._chunk_size = chunk_size
def read_len(self, length):
while True:
if len(self._buffer) >= length:
return self._pop(length)
read_len = max(self._chunk_size, length - len(self._buffer))
self._buffer += self._stream.read(read_len).decode("ascii")
def read_line(self, sep='\n'):
start = 0
while True:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size).decode("ascii")
def _pop(self, length):
r = self._buffer[:length]
self._buffer = self._buffer[length:]
return r
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to
# https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
# The default socket.read size. Default to less than half the size of
# a tweet so that it reads tweets with the minimal latency of 2 reads
# per tweet. Values higher than ~1kb will increase latency by waiting
# for more data to arrive but may also increase throughput by doing
# fewer socket read calls.
self.chunk_size = options.get("chunk_size", 512)
self.verify = options.get("verify", True)
self.api = API()
self.session = requests.Session()
self.session.headers = options.get("headers") or {}
self.session.params = None
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exception = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exception = exc
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exception = exc
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if resp:
resp.close()
self.session = requests.Session()
if exception:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exception)
raise exception
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
buf = ReadBuffer(resp.raw, self.chunk_size)
while self.running:
length = 0
while True:
line = buf.read_line().strip()
if not line:
pass # keep-alive new lines are expected
elif line.isdigit():
length = int(line)
break
else:
raise TweepError('Expecting length, unexpected value found')
next_status_obj = buf.read_len(length)
if self.running:
self._data(next_status_obj)
# # Note: keep-alive newlines might be inserted before each length value.
# # read until we get a digit...
# c = b'\n'
# for c in resp.iter_content(decode_unicode=True):
# if c == b'\n':
# continue
# break
#
# delimited_string = c
#
# # read rest of delimiter length..
# d = b''
# for d in resp.iter_content(decode_unicode=True):
# if d != b'\n':
# delimited_string += d
# continue
# break
#
# # read the next twitter status object
# if delimited_string.decode('utf-8').strip().isdigit():
# status_id = int(delimited_string)
# next_status_obj = resp.raw.read(status_id)
# if self.running:
# self._data(next_status_obj.decode('utf-8'))
if resp.raw._fp.isclosed():
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
self._thread = Thread(target=self._run)
self._thread.start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self,
stall_warnings=False,
_with=None,
replies=None,
track=None,
locations=None,
async=False,
encoding='utf8'):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json' % STREAM_VERSION
self.host = 'userstream.twitter.com'
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if _with:
self.session.params['with'] = _with
if replies:
self.session.params['replies'] = replies
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
self._start(async)
def firehose(self, count=None, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json' % STREAM_VERSION
self._start(async)
def sample(self, async=False, languages=None):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json' % STREAM_VERSION
if languages:
self.session.params['language'] = ','.join(map(str, languages))
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
stall_warnings=False, languages=None, encoding='utf8'):
self.session.params = {}
self.session.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json' % STREAM_VERSION
if follow:
self.session.params['follow'] = u','.join(follow).encode(encoding)
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = u','.join(['%.4f' % l for l in locations])
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if languages:
self.session.params['language'] = u','.join(map(str, languages))
self.body = urlencode_noplus(self.session.params)
self.session.params = {'delimited': 'length'}
self.host = 'stream.twitter.com'
self._start(async)
def sitestream(self, follow, stall_warnings=False,
with_='user', replies=False, async=False):
self.parameters = {}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/site.json' % STREAM_VERSION
self.parameters['follow'] = u','.join(map(six.text_type, follow))
self.parameters['delimited'] = 'length'
if stall_warnings:
self.parameters['stall_warnings'] = stall_warnings
if with_:
self.parameters['with'] = with_
if replies:
self.parameters['replies'] = replies
self.body = urlencode_noplus(self.parameters)
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
from ...features import Feature
class Infonoise(Feature):
def __init__(self, name, stopwords, stem_word, words_source):
self.stopwords = set(stopwords)
self.stem_word = stem_word
super().__init__(name, self.process, returns=float,
depends_on=[words_source])
def process(self, words):
non_stopwords = (w for w in words if w.lower() not in self.stopwords)
non_stopword_stems = (self.stem_word(w) for w in non_stopwords)
length_of_stemmed = sum(len(w) for w in non_stopword_stems)
if len(words) > 0:
length_of_words = sum(len(w) for w in words)
else:
length_of_words = 0
return length_of_stemmed / max(length_of_words, 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.