code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import re
from rarfile import _next_newvol, _next_oldvol
from ..plugin import StreamerBase, ProcessorBase
class RarStreamer(StreamerBase):
plugin_name = 'rar'
def __init__(self, item, lazy=False):
self.item = item
self.lazy = lazy
def _find_all_first_files(self, item):
"""
Does not support the full range of ways rar can split
as it'd require reading the file to ensure you are using the
correct way.
"""
for listed_item in item.list():
new_style = re.findall(r'(?i)\.part(\d+)\.rar^', listed_item.id)
if new_style:
if int(new_style[0]) == 1:
yield 'new', listed_item
elif listed_item.id.lower().endswith('.rar'):
yield 'old', listed_item
def _find_all_filesets(self, item):
items = {listed_item.id.lower(): listed_item for listed_item in item.list() if listed_item.is_readable}
filesets = []
for style, first_item in self._find_all_first_files(item):
fileset = []
fileset.append(first_item)
last_item_id = first_item.id.lower()
while True:
if style == 'old':
next_item_id = _next_oldvol(last_item_id)
elif style == 'new':
next_item_id = _next_newvol(last_item_id)
if next_item_id not in items:
break
fileset.append(items[next_item_id])
last_item_id = next_item_id
filesets.append(fileset)
return filesets
def _find_biggest_fileset(self, item):
filesets = self._find_all_filesets(item)
best_fileset_size, best_fileset = 0, None
for fileset in filesets:
fileset_size = sum(x['size'] for x in fileset)
if fileset_size > best_fileset_size:
best_fileset = fileset
best_fileset_size = fileset_size
return best_fileset_size, best_fileset
def evaluate(self, include_fileset=False):
if not self.item.is_listable:
return None
best_fileset_size, best_fileset = self._find_biggest_fileset(self.item)
# we would prefer the same file if it is extracted
# so lets add a small factor to take overhead into
# considerations
evaluation = int(best_fileset_size * 0.95)
if include_fileset:
return evaluation, best_fileset
else:
return evaluation
def stream(self):
best_fileset_size, best_fileset = self._find_biggest_fileset(self.item)
rar_processor_cls = ProcessorBase.find_plugin('rar')
return rar_processor_cls(self.item, best_fileset[0], lazy=self.lazy)
|
JohnDoee/thomas
|
thomas/streamers/rar.py
|
Python
|
mit
| 2,781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Singleton class.
"""
__author__ = 'Bitcraze AB'
__all__ = ['Singleton']
class Singleton(type):
"""Class for making singletons"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Called when creating new class"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
WSCU/crazyflie_ros
|
src/cfclient/utils/singleton.py
|
Python
|
gpl-2.0
| 1,499
|
icons = {True : 'fa fa-toggle-on', False : 'fa fa-toggle-off'}
info = {'name' : 'basic', 'number' : '20', 'types' : False}
fields = {'type' : "bool", 'readonly' : True, 'name' : 'Basic', 'web_field' :
False}
def icon(value):
return icons[bool(value)]
def event(payload):
data = {'commandclass' : info, 'commandclasstype' : None,
'fields' : fields}
data['value'] = int(payload['value'], 16)
data['state'] = True if payload['value'] else False
data['icon'] = icons[data['state']]
return data
|
CTSNE/NodeDefender
|
NodeDefender/icpe/zwave/commandclass/basic/__init__.py
|
Python
|
mit
| 540
|
import markdown
from markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
|
lepture/Vealous
|
vealous/markdown/extensions/rss.py
|
Python
|
bsd-3-clause
| 3,693
|
from django.db import models
from django.conf import settings
from django.core.validators import MinValueValidator, MaxValueValidator
from ekratia.topics.models import Topic
class Delegate(models.Model):
"""
Delegate Model stores the delegations made by user in the system.
"""
#: User delgating
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="user_set")
#: User to delegate
delegate = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name="delegate_set")
topic = models.ForeignKey(Topic, null=True, blank=True)
value = models.FloatField(validators=[
MinValueValidator(0.0),
MaxValueValidator(1.0)], default=0.0)
def __unicode__(self):
return "%s delegates to %s" % (self.user, self.delegate)
|
andresgz/ekratia
|
ekratia/delegates/models.py
|
Python
|
bsd-3-clause
| 857
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.auth.models import AnonymousUser
try:
from django.conf.urls import url, patterns, include, handler404, handler500
except ImportError:
from django.conf.urls.defaults import url, patterns, include, handler404, handler500 # pyflakes:ignore
__all__ = [
'User',
'Group',
'Permission',
'AnonymousUser',
'get_user_model',
'user_model_label',
'url',
'patterns',
'include',
'handler404',
'handler500'
]
# Django 1.5 compatibility utilities, providing support for custom User models.
# Since get_user_model() causes a circular import if called when app models are
# being loaded, the user_model_label should be used when possible, with calls
# to get_user_model deferred to execution time
# user_model_label = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
user_model_label = 'accounts.Operator'
# try:
# from django.contrib.auth import get_user_model
# except ImportError:
# from django.contrib.auth.models import User
# get_user_model = lambda: User
from accounts.models import Operator
get_user_model = lambda: Operator
def get_user_model_path():
"""
Returns 'app_label.ModelName' for User model. Basically if
``AUTH_USER_MODEL`` is set at settings it would be returned, otherwise
``auth.User`` is returned.
"""
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_user_permission_full_codename(perm):
"""
Returns 'app_label.<perm>_<usermodulename>'. If standard ``auth.User`` is
used, for 'change' perm this would return ``auth.change_user`` and if
``myapp.CustomUser`` is used it would return ``myapp.change_customuser``.
"""
User = get_user_model()
return '%s.%s_%s' % (User._meta.app_label, perm, User._meta.module_name)
def get_user_permission_codename(perm):
"""
Returns '<perm>_<usermodulename>'. If standard ``auth.User`` is
used, for 'change' perm this would return ``change_user`` and if
``myapp.CustomUser`` is used it would return ``change_customuser``.
"""
return get_user_permission_full_codename(perm).split('.')[1]
# Python 3
try:
unicode = unicode # pyflakes:ignore
basestring = basestring # pyflakes:ignore
str = str # pyflakes:ignore
except NameError:
basestring = unicode = str = str
__all__ = ['User', 'Group', 'Permission', 'AnonymousUser']
|
khamaileon/django-guardian
|
guardian/compat.py
|
Python
|
bsd-2-clause
| 2,527
|
'''@file test.py
this file will test the asr combined with an lm'''
import os
from six.moves import configparser
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from nabu.neuralnetworks.classifiers import asr_lm_classifier
from nabu.neuralnetworks.decoders import decoder_factory
from nabu.processing import feature_reader, target_coder
tf.app.flags.DEFINE_string('asr_expdir', 'expdir',
'The asr experiments directory')
tf.app.flags.DEFINE_string('lm_expdir', 'expdir',
'The lm experiments directory')
FLAGS = tf.app.flags.FLAGS
def main(_):
'''does everything for testing'''
decoder_cfg_file = None
#read the database config file
parsed_database_cfg = configparser.ConfigParser()
parsed_database_cfg.read(os.path.join(FLAGS.asr_expdir, 'database.cfg'))
database_cfg = dict(parsed_database_cfg.items('database'))
#read the features config file
parsed_feat_cfg = configparser.ConfigParser()
parsed_feat_cfg.read(
os.path.join(FLAGS.asr_expdir, 'model', 'features.cfg'))
feat_cfg = dict(parsed_feat_cfg.items('features'))
#read the asr config file
parsed_asr_cfg = configparser.ConfigParser()
parsed_asr_cfg.read(os.path.join(FLAGS.asr_expdir, 'model', 'asr.cfg'))
asr_cfg = dict(parsed_asr_cfg.items('asr'))
#read the lm config file
parsed_lm_cfg = configparser.ConfigParser()
parsed_lm_cfg.read(os.path.join(FLAGS.lm_expdir, 'model', 'lm.cfg'))
lm_cfg = dict(parsed_lm_cfg.items('lm'))
#read the asr-lm config file
parsed_asr_lm_cfg = configparser.ConfigParser()
parsed_asr_lm_cfg.read('config/asr_lm.cfg')
asr_lm_cfg = dict(parsed_asr_lm_cfg.items('asr-lm'))
#read the decoder config file
if decoder_cfg_file is None:
decoder_cfg_file = os.path.join(FLAGS.asr_expdir, 'model',
'decoder.cfg')
parsed_decoder_cfg = configparser.ConfigParser()
parsed_decoder_cfg.read(decoder_cfg_file)
decoder_cfg = dict(parsed_decoder_cfg.items('decoder'))
#create a feature reader
featdir = os.path.join(database_cfg['test_dir'], feat_cfg['name'])
with open(os.path.join(featdir, 'maxlength'), 'r') as fid:
max_length = int(fid.read())
reader = feature_reader.FeatureReader(
scpfile=os.path.join(featdir, 'feats.scp'),
cmvnfile=os.path.join(featdir, 'cmvn.scp'),
utt2spkfile=os.path.join(featdir, 'utt2spk'),
max_length=max_length)
#read the feature dimension
with open(
os.path.join(database_cfg['train_dir'], feat_cfg['name'],
'dim'),
'r') as fid:
input_dim = int(fid.read())
#create the coder
with open(os.path.join(database_cfg['train_dir'], 'alphabet')) as fid:
alphabet = fid.read().split(' ')
coder = target_coder.TargetCoder(alphabet)
#create the classifier
classifier = asr_lm_classifier.AsrLmClassifier(
conf=asr_lm_cfg,
asr_conf=asr_cfg,
lm_conf=lm_cfg,
output_dim=coder.num_labels)
#create a decoder
graph = tf.Graph()
with graph.as_default():
decoder = decoder_factory.factory(
conf=decoder_cfg,
classifier=classifier,
input_dim=input_dim,
max_input_length=reader.max_length,
coder=coder,
expdir=FLAGS.asr_expdir)
#create the lm saver
varnames = zip(*checkpoint_utils.list_variables(os.path.join(
FLAGS.lm_expdir, 'model', 'network.ckpt')))[0]
variables = [v for v in tf.all_variables()
if v.name.split(':')[0] in varnames]
lm_saver = tf.train.Saver(variables)
#create the asr saver
varnames = zip(*checkpoint_utils.list_variables(os.path.join(
FLAGS.asr_expdir, 'model', 'network.ckpt')))[0]
variables = [v for v in tf.all_variables()
if v.name.split(':')[0] in varnames]
asr_saver = tf.train.Saver(variables)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
config.allow_soft_placement = True
with tf.Session(graph=graph, config=config) as sess:
#load the lm model
lm_saver.restore(
sess, os.path.join(FLAGS.lm_expdir, 'model', 'network.ckpt'))
#load the asr model
asr_saver.restore(
sess, os.path.join(FLAGS.asr_expdir, 'model', 'network.ckpt'))
#decode with te neural net
decoded = decoder.decode(reader, sess)
#the path to the text file
textfile = database_cfg['testtext']
#read all the reference transcriptions
with open(textfile) as fid:
lines = fid.readlines()
references = dict()
for line in lines:
splitline = line.strip().split(' ')
references[splitline[0]] = ' '.join(splitline[1:])
#compute the character error rate
score = decoder.score(decoded, references)
print 'score: %f' % score
if __name__ == '__main__':
tf.app.run()
|
JeroenBosmans/nabu
|
test.py
|
Python
|
mit
| 5,120
|
# regression test for SAX 2.0 -*- coding: iso-8859-1 -*-
# $Id: test_sax.py,v 1.1.1.1 2006/05/30 06:06:14 hhzhou Exp $
from xml.sax import make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise ImportError("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
XMLFilterBase
from xml.sax.expatreader import create_parser
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
from test.test_support import verify, verbose, TestFailed, findfile
import os
# ===== Utilities
tests = 0
failures = []
def confirm(outcome, name):
global tests
tests = tests + 1
if outcome:
if verbose:
print "Passed", name
else:
failures.append(name)
def test_make_parser2():
try:
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
except:
return 0
else:
return p
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
# ===== escape
def test_escape_basic():
return escape("Donald Duck & Co") == "Donald Duck & Co"
def test_escape_all():
return escape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_escape_extra():
return escape("Hei på deg", {"å" : "å"}) == "Hei på deg"
# ===== unescape
def test_unescape_basic():
return unescape("Donald Duck & Co") == "Donald Duck & Co"
def test_unescape_all():
return unescape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_unescape_extra():
return unescape("Hei på deg", {"å" : "å"}) == "Hei på deg"
def test_unescape_amp_extra():
return unescape("&foo;", {"&foo;": "splat"}) == "&foo;"
# ===== quoteattr
def test_quoteattr_basic():
return quoteattr("Donald Duck & Co") == '"Donald Duck & Co"'
def test_single_quoteattr():
return (quoteattr('Includes "double" quotes')
== '\'Includes "double" quotes\'')
def test_double_quoteattr():
return (quoteattr("Includes 'single' quotes")
== "\"Includes 'single' quotes\"")
def test_single_double_quoteattr():
return (quoteattr("Includes 'single' and \"double\" quotes")
== "\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser():
try:
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
except:
return 0
else:
return p
# ===== XMLGenerator
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
def test_xmlgen_basic():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc></doc>"
def test_xmlgen_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>huhei</doc>"
def test_xmlgen_pi():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<?test data?><doc></doc>"
def test_xmlgen_content_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc><huhei&</doc>"
def test_xmlgen_attr_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start \
+ "<doc a='\"'><e a=\"'\"></e><e a=\"'"\"></e></doc>"
def test_xmlgen_ignorable():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc> </doc>"
ns_uri = "http://www.python.org/xml-ns/saxtest/"
def test_xmlgen_ns():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
return result.getvalue() == start + \
('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri)
# ===== XMLFilterBase
def test_filter_basic():
result = StringIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
return result.getvalue() == start + "<doc>content </doc>"
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
# ===== XMLReader support
def test_expat_file():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler():
parser = create_parser()
handler = TestDTDHandler()
parser.setDTDHandler(handler)
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
parser.feed(']>\n')
parser.feed('<doc></doc>')
parser.close()
return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \
handler._entities == [("img", None, "expat.gif", "GIF")]
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(StringIO("<entity/>"))
return inpsrc
def test_expat_entityresolver():
parser = create_parser()
parser.setEntityResolver(TestEntityResolver())
result = StringIO()
parser.setContentHandler(XMLGenerator(result))
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY test SYSTEM "whatever">\n')
parser.feed(']>\n')
parser.feed('<doc>&test;</doc>')
parser.close()
return result.getvalue() == start + "<doc><entity></entity></doc>"
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr():
parser = create_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc attr='val'/>")
parser.close()
return verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
return verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr():
parser = create_parser(1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
parser.close()
attrs = gather._attrs
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
(attrs.getQNames() == [] or attrs.getQNames() == ["ns:attr"]) and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs[(ns_uri, "attr")] == "val"
# ===== InputSource support
xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read()
def test_expat_inpsource_filename():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
return result.getvalue() == xml_test_out
def test_expat_inpsource_sysid():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(findfile("test"+os.extsep+"xml")))
return result.getvalue() == xml_test_out
def test_expat_inpsource_stream():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml")))
parser.parse(inpsrc)
return result.getvalue() == xml_test_out
# ===== IncrementalParser support
def test_expat_incremental():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc></doc>"
def test_expat_incremental_reset():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("text")
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.reset()
parser.feed("<doc>")
parser.feed("text")
parser.feed("</doc>")
parser.close()
return result.getvalue() == start + "<doc>text</doc>"
# ===== Locator support
def test_expat_locator_noinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
return parser.getSystemId() is None and \
parser.getPublicId() is None and \
parser.getLineNumber() == 1
def test_expat_locator_withinfo():
result = StringIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
return parser.getSystemId() == findfile("test.xml") and \
parser.getPublicId() is None
# ===========================================================================
#
# error reporting
#
# ===========================================================================
def test_expat_inpsource_location():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
source = InputSource()
source.setByteStream(StringIO("<foo bar foobar>")) #ill-formed
name = "a file name"
source.setSystemId(name)
try:
parser.parse(source)
except SAXException, e:
return e.getSystemId() == name
def test_expat_incomplete():
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
try:
parser.parse(StringIO("<foo>"))
except SAXParseException:
return 1 # ok, error found
else:
return 0
def test_sax_parse_exception_str():
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(SAXParseException("message", None,
DummyLocator(1, 1)))
# use None for the line number:
str(SAXParseException("message", None,
DummyLocator(None, 1)))
# use None for the column number:
str(SAXParseException("message", None,
DummyLocator(1, None)))
# use None for both:
str(SAXParseException("message", None,
DummyLocator(None, None)))
return 1
class DummyLocator:
def __init__(self, lineno, colno):
self._lineno = lineno
self._colno = colno
def getPublicId(self):
return "pubid"
def getSystemId(self):
return "sysid"
def getLineNumber(self):
return self._lineno
def getColumnNumber(self):
return self._colno
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
# ===== AttributesImpl
def verify_empty_attrs(attrs):
try:
attrs.getValue("attr")
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName("attr")
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs["attr"]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key("attr") and \
attrs.keys() == [] and \
attrs.get("attrs") is None and \
attrs.get("attrs", 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def verify_attrs_wattr(attrs):
return attrs.getLength() == 1 and \
attrs.getNames() == ["attr"] and \
attrs.getQNames() == ["attr"] and \
len(attrs) == 1 and \
attrs.has_key("attr") and \
attrs.keys() == ["attr"] and \
attrs.get("attr") == "val" and \
attrs.get("attr", 25) == "val" and \
attrs.items() == [("attr", "val")] and \
attrs.values() == ["val"] and \
attrs.getValue("attr") == "val" and \
attrs.getValueByQName("attr") == "val" and \
attrs.getNameByQName("attr") == "attr" and \
attrs["attr"] == "val" and \
attrs.getQNameByName("attr") == "attr"
def test_attrs_empty():
return verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr():
return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
# ===== AttributesImpl
def verify_empty_nsattrs(attrs):
try:
attrs.getValue((ns_uri, "attr"))
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("ns:attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("ns:attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName((ns_uri, "attr"))
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs[(ns_uri, "attr")]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [] and \
attrs.get((ns_uri, "attr")) is None and \
attrs.get((ns_uri, "attr"), 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def test_nsattrs_empty():
return verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr():
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs.getValueByQName("ns:attr") == "val" and \
attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
attrs[(ns_uri, "attr")] == "val" and \
attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
# ===== Main program
def make_test_output():
parser = create_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test"+os.extsep+"xml"))
outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w")
outf.write(result.getvalue())
outf.close()
items = locals().items()
items.sort()
for (name, value) in items:
if name[ : 5] == "test_":
confirm(value(), name)
# We delete the items variable so that the assignment to items above
# doesn't pick up the old value of items (which messes with attempts
# to find reference leaks).
del items
if verbose:
print "%d tests, %d failures" % (tests, len(failures))
if failures:
raise TestFailed("%d of %d tests failed: %s"
% (len(failures), tests, ", ".join(failures)))
|
kontais/EFI-MIPS
|
ToolKit/cmds/python/Lib/test/skipped/test_sax.py
|
Python
|
bsd-3-clause
| 19,483
|
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import saml2
def create_conf(sp_host='sp.example.com', idp_hosts=['idp.example.com'],
metadata_file='remote_metadata.xml'):
try:
from saml2.sigver import get_xmlsec_binary
except ImportError:
get_xmlsec_binary = None
if get_xmlsec_binary:
xmlsec_path = get_xmlsec_binary(["/opt/local/bin"])
else:
xmlsec_path = '/usr/bin/xmlsec1'
BASEDIR = os.path.dirname(os.path.abspath(__file__))
config = {
'xmlsec_binary': xmlsec_path,
'entityid': 'http://%s/saml2/metadata/' % sp_host,
'attribute_map_dir': os.path.join(BASEDIR, 'attribute-maps'),
'service': {
'sp': {
'name': 'Test SP',
'name_id_format': saml2.saml.NAMEID_FORMAT_PERSISTENT,
'endpoints': {
'assertion_consumer_service': [
('http://%s/saml2/acs/' % sp_host,
saml2.BINDING_HTTP_POST),
],
'single_logout_service': [
('http://%s/saml2/ls/' % sp_host,
saml2.BINDING_HTTP_REDIRECT),
],
},
'required_attributes': ['uid'],
'optional_attributes': ['eduPersonAffiliation'],
'idp': {} # this is filled later
},
},
'metadata': {
'local': [os.path.join(BASEDIR, metadata_file)],
},
'debug': 1,
# certificates
'key_file': os.path.join(BASEDIR, 'mycert.key'),
'cert_file': os.path.join(BASEDIR, 'mycert.pem'),
# These fields are only used when generating the metadata
'contact_person': [
{'given_name': 'Technical givenname',
'sur_name': 'Technical surname',
'company': 'Example Inc.',
'email_address': 'technical@sp.example.com',
'contact_type': 'technical'},
{'given_name': 'Administrative givenname',
'sur_name': 'Administrative surname',
'company': 'Example Inc.',
'email_address': 'administrative@sp.example.ccom',
'contact_type': 'administrative'},
],
'organization': {
'name': [('Ejemplo S.A.', 'es'), ('Example Inc.', 'en')],
'display_name': [('Ejemplo', 'es'), ('Example', 'en')],
'url': [('http://www.example.es', 'es'),
('http://www.example.com', 'en')],
},
'valid_for': 24,
}
for idp in idp_hosts:
entity_id = 'https://%s/simplesaml/saml2/idp/metadata.php' % idp
config['service']['sp']['idp'][entity_id] = {
'single_sign_on_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://%s/simplesaml/saml2/idp/SSOService.php' % idp,
},
'single_logout_service': {
saml2.BINDING_HTTP_REDIRECT: 'https://%s/simplesaml/saml2/idp/SingleLogoutService.php' % idp,
},
}
return config
|
WebSpider/djangosaml2
|
djangosaml2/tests/conf.py
|
Python
|
apache-2.0
| 3,818
|
import re
import os
class ACAdapter:
present = False
name = ''
def get_ac_adapters():
r = []
try:
lst = []
lst = os.listdir('/proc/acpi/ac_adapter')
except:
pass
for x in lst:
try:
a = ACAdapter()
a.name = x
ss = open('/proc/acpi/ac_adapter/%s/state' % x).read().split('\n')
for s in ss:
if s.startswith('state:'):
a.present = s.endswith('on-line')
r.append(a)
except:
pass
return r
class Battery:
present = False
name = ''
charge = 0
total = 0
current = 0
def get_batteries():
r = []
try:
lst = []
lst = os.listdir('/proc/acpi/battery')
except:
pass
for x in lst:
try:
b = Battery()
b.name = x
b.total = 10000
b.current = 10000
ss = open('/proc/acpi/battery/%s/state' % x).read().split('\n')
for s in ss:
if s.startswith('present:'):
b.present = s.endswith('yes')
if s.startswith('remaining capacity:'):
b.current = int(s.split()[2])
ss = open('/proc/acpi/battery/%s/info' % x).read().split('\n')
for s in ss:
if s.startswith('design capacity:'):
b.total = int(s.split()[2])
b.charge = int(b.current * 100 / b.total)
r.append(b)
except:
pass
return r
def get_uptime():
minute = 60
hour = minute * 60
day = hour * 24
d = h = m = 0
try:
s = int(open('/proc/uptime').read().split('.')[0])
d = s / day
s -= d * day
h = s / hour
s -= h * hour
m = s / minute
s -= m * minute
except IOError:
# Try use 'uptime' command
up = os.popen('uptime').read()
if up:
uptime = re.search('up\s+(.*?),\s+[0-9]+ user',up).group(1)
return uptime
uptime = ""
if d > 1:
uptime = "%d days, "%d
elif d == 1:
uptime = "1 day, "
return uptime + "%d:%02d:%02d"%(h,m,s)
|
Shanto/ajenti
|
plugins/power/backend.py
|
Python
|
lgpl-3.0
| 2,240
|
from __future__ import absolute_import
from .CoolProp import HAProps, HAProps_Aux, cair_sat
|
CoolProp/CoolProp-museum
|
wrappers/Python/CoolProp/HumidAirProp.py
|
Python
|
mit
| 91
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Zenodo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Convenience module for importing utilities need in a shell."""
import os
from werkzeug.utils import secure_filename
from invenio.base.globals import cfg
from invenio.ext.cache import cache
from invenio.ext.login import UserInfo
from invenio.ext.sqlalchemy import db
from invenio.modules.accounts.models import User
from invenio.modules.deposit.models import Deposition, DepositionFile, \
DepositionStorage, DepositionType
from invenio.modules.formatter import format_record
from invenio.modules.pidstore.models import PersistentIdentifier
from invenio.modules.pidstore.tasks import datacite_delete, \
datacite_register, datacite_sync, datacite_update
from invenio.modules.records.api import get_record
from invenio.utils.serializers import ZlibPickle as Serializer
from zenodo.modules.deposit.workflows.upload import transfer_ownership
def ban_user(user_id):
"""Block user."""
u = User.query.get(user_id)
if u.note != '0':
u.note = '0'
db.session.commit()
remove_session(user_id)
def remove_session(user_id):
"""Remove session for a user."""
prefix = cache.cache.key_prefix + "session::"
for k in cache.cache._client.keys():
if k.startswith(prefix):
k = k[len(cache.cache.key_prefix):]
try:
data = Serializer.loads(cache.get(k))
if data['uid'] == user_id:
print k
cache.delete(k)
except TypeError:
pass
def deposition_users(depositions):
"""Iterate over deposition users."""
for d in depositions:
yield Deposition.get(d).user_id
def deposition_users_emails(depositions):
"""Get list of email addresses for depositions."""
for user_id in deposition_users(depositions):
yield User.query.get(user_id).email
def deposition_with_files(files, user_id=None, deposition_id=None):
"""Add very big files to a deposition."""
if deposition_id:
d = Deposition.get(deposition_id)
else:
d = Deposition.create(User.query.get(user_id))
for filepath in files:
with open(filepath, "rb") as fileobj:
filename = os.path.basename(filepath)
df = DepositionFile(backend=DepositionStorage(d.id))
df.save(fileobj, filename=filename)
d.add_file(df)
return d
|
otron/zenodo
|
zenodo/shell.py
|
Python
|
gpl-3.0
| 3,274
|
from django.template.base import Library
from static_precompiler.compilers import SASS
from static_precompiler.templatetags.base import BaseInlineNode
register = Library()
compiler = SASS()
class InlineSASSNode(BaseInlineNode):
compiler = compiler
#noinspection PyUnusedLocal
@register.tag(name="inlinesass")
def do_inlinecoffeescript(parser, token):
nodelist = parser.parse(("endinlinesass",))
parser.delete_first_token()
return InlineSASSNode(nodelist)
@register.simple_tag
def sass(path):
return compiler.compile(path)
|
woodymit/millstone_accidental_source
|
celery_manager/static_precompiler/templatetags/sass.py
|
Python
|
mit
| 551
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.common.sql import migration_helpers
def upgrade(migrate_engine):
try:
extension_version = migration_helpers.get_db_version(
extension='endpoint_filter',
engine=migrate_engine)
except Exception:
extension_version = 0
# This migration corresponds to endpoint_filter extension migration 2. Only
# update if it has not been run.
if extension_version >= 2:
return
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
EP_GROUP_ID = 'endpoint_group_id'
PROJECT_ID = 'project_id'
endpoint_filtering_table = sql.Table(
'project_endpoint',
meta,
sql.Column(
'endpoint_id',
sql.String(64),
primary_key=True,
nullable=False),
sql.Column(
'project_id',
sql.String(64),
primary_key=True,
nullable=False))
endpoint_filtering_table.create(migrate_engine, checkfirst=True)
endpoint_group_table = sql.Table(
'endpoint_group',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), nullable=False),
sql.Column('description', sql.Text, nullable=True),
sql.Column('filters', sql.Text(), nullable=False))
endpoint_group_table.create(migrate_engine, checkfirst=True)
project_endpoint_group_table = sql.Table(
'project_endpoint_group',
meta,
sql.Column(EP_GROUP_ID, sql.String(64),
sql.ForeignKey('endpoint_group.id'), nullable=False),
sql.Column(PROJECT_ID, sql.String(64), nullable=False),
sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID))
project_endpoint_group_table.create(migrate_engine, checkfirst=True)
|
cernops/keystone
|
keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
|
Python
|
apache-2.0
| 2,467
|
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas Series or DataFrame with DatetimeIndex
endog = pd.Series(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
|
bert9bert/statsmodels
|
examples/python/tsa_dates.py
|
Python
|
bsd-3-clause
| 1,180
|
"""
NEPI, a framework to manage network experiments
Copyright (C) 2013 INRIA
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Author: Julien Tribino <julien.tribino@inria.fr>
Example :
- Testbed : iMinds
- Explanation :
Test the STDIN Message
Node Zotack
node0.nepi-robot.nepi.wilab2.ilabt.iminds.be
0
|
|
0
Application RobotCTRLComm.rb
- Experiment:
- t0 : Deployment
- t1 : After the application started, send the message START_DRIVE
- t2 (t1 + 83s) : Open Left eye of robot 1
- t3 (t2 + 2s) : Open Left eye of robot 2
"""
from nepi.execution.resource import ResourceFactory, ResourceAction, ResourceState
from nepi.execution.ec import ExperimentController
import time
# Create the EC
ec = ExperimentController()
# Create and Configure the Node
node1 = ec.register_resource("omf::Node")
# If the hostname is not declared, Nepi will take SFA to provision one.
ec.set(node1, 'hostname', 'node0.nepi-robot.nepi.wilab2.ilabt.iminds.be')
# XMPP credentials
ec.set(node1, 'xmppServer', "default_slice_iminds")
ec.set(node1, 'xmppUser', "am.wilab2.ilabt.iminds.be")
ec.set(node1, 'xmppPort', "5222")
ec.set(node1, 'xmppPassword', "1234")
ec.set(node1, 'version', "5")
# Create and Configure the Application
app1 = ec.register_resource("omf::RobotApplication")
ec.set(app1, 'appid', "robot")
ec.set(app1, 'version', "5")
ec.set(app1, 'command', "/users/jtribino/RobotCTRLComm.rb /users/jtribino/coordinate.csv")
# /users/username/RobotCTRLComm.rb /users/username/coordinate.csv
ec.set(app1, 'env', " ")
ec.set(app1, 'sources', "/home/wlab18/Desktop/coordinate.csv") # local path
ec.set(app1, 'sshUser', "jtribino") # username
# Connection
ec.register_connection(app1, node1)
# The Application should run during 350sec
ec.register_condition(app1, ResourceAction.STOP, app1, ResourceState.STARTED , "350s")
# Deploy
ec.deploy()
ec.wait_started([app1])
ec.set(app1, 'stdin', "START_DRIVE")
time.sleep(83)
ec.set(app1, 'stdin', "1;openlefteye")
time.sleep(2)
ec.set(app1, 'stdin', "2;openlefteye")
ec.wait_finished([app1])
# Stop Experiment
ec.shutdown()
|
phiros/nepi
|
examples/omf/testing/nepi_omf5_iminds_stdin.py
|
Python
|
gpl-3.0
| 2,831
|
from __future__ import division # For Python 2 compatibility
"""Pareto smoothed importance sampling (PSIS)
This module implements Pareto smoothed importance sampling (PSIS) and PSIS
leave-one-out (LOO) cross-validation for Python (Numpy).
Included functions
------------------
psisloo
Pareto smoothed importance sampling leave-one-out log predictive densities.
psislw
Pareto smoothed importance sampling.
gpdfitnew
Estimate the paramaters for the Generalized Pareto Distribution (GPD).
gpinv
Inverse Generalised Pareto distribution function.
sumlogs
Sum of vector where numbers are represented by their logarithms.
References
----------
Aki Vehtari, Andrew Gelman and Jonah Gabry (2016). Practical
Bayesian model evaluation using leave-one-out cross-validation
and WAIC. Statistics and Computing, doi:10.1007/s11222-016-9696-4.
Aki Vehtari, Andrew Gelman and Jonah Gabry (2016). Pareto
smoothed importance sampling. arXiv preprint arXiv:1507.02646v4.
"""
# 3-Clause BSD License
"""
Copyright 2017 Aki Vehtari, Tuomas Sivula
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """
import numpy as np
def psisloo(log_lik, **kwargs):
r"""PSIS leave-one-out log predictive densities.
Computes the log predictive densities given posterior samples of the log
likelihood terms :math:`p(y_i|\theta^s)` in input parameter `log_lik`.
Returns a sum of the leave-one-out log predictive densities `loo`,
individual leave-one-out log predictive density terms `loos` and an estimate
of Pareto tail indeces `ks`. If tail index k > 0.5, variance of the raw
estimate does not exist and if tail index k > 1 the mean of the raw estimate
does not exist and the PSIS estimate is likely to have large variation and
some bias.
Parameters
----------
log_lik : ndarray
Array of size n x m containing n posterior samples of the log likelihood
terms p(y_i|theta^s).
Additional keyword arguments are passed to the psislw() function (see the
corresponding documentation).
Returns
-------
loo : scalar
sum of the leave-one-out log predictive densities
loos : ndarray
individual leave-one-out log predictive density terms
ks : ndarray
estimated Pareto tail indeces
"""
# ensure overwrite flag in passed arguments
kwargs['overwrite_lw'] = True
# log raw weights from log_lik
lw = -log_lik
# compute Pareto smoothed log weights given raw log weights
lw, ks = psislw(lw, **kwargs)
# compute
lw += log_lik
loos = sumlogs(lw, axis=0)
loo = loos.sum()
return loo, loos, ks
def psislw(lw, wcpp=20, wtrunc=3/4, overwrite_lw=False):
"""Pareto smoothed importance sampling (PSIS).
Parameters
----------
lw : ndarray
Array of size n x m containing m sets of n log weights. It is also
possible to provide one dimensional array of length n.
wcpp : number
Percentage of samples used for GPD fit estimate (default is 20).
wtrunc : float
Positive parameter for truncating very large weights to ``n**wtrunc``.
Providing False or 0 disables truncation. Default values is 3/4.
overwrite_lw : bool, optional
If True, the input array `lw` is smoothed in-place. By default, a new
array is allocated.
Returns
-------
lw_out : ndarray
smoothed log weights
kss : ndarray
Pareto tail indices
"""
if lw.ndim == 2:
n, m = lw.shape
elif lw.ndim == 1:
n = len(lw)
m = 1
else:
raise ValueError("Argument `lw` must be 1 or 2 dimensional.")
if n <= 1:
raise ValueError("More than one log-weight needed.")
if overwrite_lw:
# in-place operation
lw_out = lw
else:
# allocate new array for output
lw_out = np.copy(lw, order='K')
# allocate output array for kss
kss = np.empty(m)
# precalculate constants
cutoffmin = np.log(np.finfo(float).tiny)
logn = np.log(n)
# loop over sets of log weights
for i, x in enumerate(lw_out.T if lw_out.ndim == 2 else lw_out[None,:]):
# improve numerical accuracy
x -= np.max(x)
# divide log weights into body and right tail
xcutoff = max(
np.percentile(x, 100 - wcpp),
cutoffmin
)
expxcutoff = np.exp(xcutoff)
tailinds, = np.where(x > xcutoff)
x2 = x[tailinds]
n2 = len(x2)
if n2 <= 4:
# not enough tail samples for gpdfitnew
k = np.inf
else:
# order of tail samples
x2si = np.argsort(x2)
# fit generalized Pareto distribution to the right tail samples
np.exp(x2, out=x2)
x2 -= expxcutoff
k, sigma = gpdfitnew(x2, sort=x2si)
# compute ordered statistic for the fit
sti = np.arange(0.5, n2)
sti /= n2
qq = gpinv(sti, k, sigma)
qq += expxcutoff
np.log(qq, out=qq)
# place the smoothed tail into the output array
x[tailinds[x2si]] = qq
if wtrunc > 0:
# truncate too large weights
lwtrunc = wtrunc * logn - logn + sumlogs(x)
x[x > lwtrunc] = lwtrunc
# renormalize weights
x -= sumlogs(x)
# store tail index k
kss[i] = k
# If the provided input array is one dimensional, return kss as scalar.
if lw_out.ndim == 1:
kss = kss[0]
return lw_out, kss
def gpdfitnew(x, sort=True, sort_in_place=False):
"""Estimate the paramaters for the Generalized Pareto Distribution (GPD)
Returns empirical Bayes estimate for the parameters of the two-parameter
generalized Parato distribution given the data.
Parameters
----------
x : ndarray
One dimensional data array
sort : bool or ndarray, optional
If known in advance, one can provide an array of indices that would
sort the input array `x`. If the input array is already sorted, provide
False. If True (default behaviour), the array is sorted internally.
sort_in_place : bool, optional
If `sort` is True and `sort_in_place` is True, the array is sorted
in-place (False by default).
Returns
-------
k, sigma : float
estimated parameter values
Notes
-----
This function returns a negative of Zhang and Stephens's k, because it is
more common parameterisation.
"""
if x.ndim != 1 or len(x) <= 1:
raise ValueError("Invalid input array.")
# check if x should be sorted
if sort is True:
if sort_in_place:
x.sort()
xsorted = True
else:
sort = np.argsort(x)
xsorted = False
elif sort is False:
xsorted = True
else:
xsorted = False
n = len(x)
m = 80 + int(np.sqrt(n))
bs = np.arange(1, m + 1, dtype=float)
bs -= 0.5
np.divide(m, bs, out=bs)
np.sqrt(bs, out=bs)
np.subtract(1, bs, out=bs)
if xsorted:
bs /= 3 * x[int(n/4 + 0.5) - 1]
bs += 1 / x[-1]
else:
bs /= 3 * x[sort[int(n/4 + 0.5) - 1]]
bs += 1 / x[sort[-1]]
ks = np.negative(bs)
temp = ks[:,None] * x
np.log1p(temp, out=temp)
np.mean(temp, axis=1, out=ks)
L = bs / ks
np.negative(L, out=L)
np.log(L, out=L)
L -= ks
L -= 1
L *= n
temp = L - L[:,None]
np.exp(temp, out=temp)
w = np.sum(temp, axis=1)
np.divide(1, w, out=w)
# remove negligible weights
dii = w >= 10 * np.finfo(float).eps
if not np.all(dii):
w = w[dii]
bs = bs[dii]
# normalise w
w /= w.sum()
# posterior mean for b
b = np.sum(bs * w)
# Estimate for k, note that we return a negative of Zhang and
# Stephens's k, because it is more common parameterisation.
temp = (-b) * x
np.log1p(temp, out=temp)
k = np.mean(temp)
# estimate for sigma
sigma = -k / b
return k, sigma
def gpinv(p, k, sigma):
"""Inverse Generalised Pareto distribution function."""
x = np.empty(p.shape)
x.fill(np.nan)
if sigma <= 0:
return x
ok = (p > 0) & (p < 1)
if np.all(ok):
if np.abs(k) < np.finfo(float).eps:
np.negative(p, out=x)
np.log1p(x, out=x)
np.negative(x, out=x)
else:
np.negative(p, out=x)
np.log1p(x, out=x)
x *= -k
np.expm1(x, out=x)
x /= k
x *= sigma
else:
if np.abs(k) < np.finfo(float).eps:
# x[ok] = - np.log1p(-p[ok])
temp = p[ok]
np.negative(temp, out=temp)
np.log1p(temp, out=temp)
np.negative(temp, out=temp)
x[ok] = temp
else:
# x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
temp = p[ok]
np.negative(temp, out=temp)
np.log1p(temp, out=temp)
temp *= -k
np.expm1(temp, out=temp)
temp /= k
x[ok] = temp
x *= sigma
x[p == 0] = 0
if k >= 0:
x[p == 1] = np.inf
else:
x[p == 1] = -sigma / k
return x
def sumlogs(x, axis=None, out=None):
"""Sum of vector where numbers are represented by their logarithms.
Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that
it works even when elements have large magnitude.
"""
maxx = x.max(axis=axis, keepdims=True)
xnorm = x - maxx
np.exp(xnorm, out=xnorm)
out = np.sum(xnorm, axis=axis, out=out)
if isinstance(out, np.ndarray):
np.log(out, out=out)
else:
out = np.log(out)
out += np.squeeze(maxx)
return out
|
tsivula/BDA_py_demos
|
utilities_and_data/psis.py
|
Python
|
gpl-3.0
| 11,215
|
# Plaid stimulus
#
# Copyright (C) 2010-2013 Huang Xin
#
# See LICENSE.TXT that came with this file.
# Taget stimuli
#
# Copyright (C) 2010-2013 Huang Xin
#
# See LICENSE.TXT that came with this file.
from VisionEgg.Gratings import SinGrating2D
from LightData import dictattr
from Core import Stimulus
class Plaid(Stimulus):
def __init__(self, params, **kwargs):
super(Plaid, self).__init__(**kwargs)
self.name = 'plaid'
self.parameters = dictattr()
self.set_parameters(self.parameters, params)
self.make_stimuli()
def make_stimuli(self):
size = self.viewport.get_size()
width = max(size)
self.grating1 = SinGrating2D(anchor='center',
position=(size[0]/2, size[1]/2),
size=(width, width),
pedestal=self.parameters.ml[0],
orientation = self.parameters.ori[0],
spatial_freq = self.viewport.cycDeg2cycPix(self.parameters.sfreqCycDeg[0]),
temporal_freq_hz = self.parameters.tfreqCycSec[0],
max_alpha=0.5,
ignore_time=True,
on=True)
self.grating2 = SinGrating2D(anchor='center',
position=(size[0]/2, size[1]/2),
size=(width, width),
pedestal=self.parameters.ml[1],
orientation = self.parameters.ori[1],
spatial_freq = self.viewport.cycDeg2cycPix(self.parameters.sfreqCycDeg[1]),
temporal_freq_hz = self.parameters.tfreqCycSec[1],
max_alpha=0.5,
ignore_time=True,
on=True)
self.stimuli = (self.grating1, self.grating2)
|
chrox/RealTimeElectrophy
|
StimControl/LightStim/Plaid.py
|
Python
|
bsd-2-clause
| 2,071
|
SUCCESS = 'success'
FAILURE = 'failure'
WARNING = 'warning'
class PyAem2Result():
def __init__(self, response):
self.response = response
self.status = None
self.message = None
def success(self, message):
self.status = SUCCESS
self.message = message
def failure(self, message):
self.status = FAILURE
self.message = message
def warning(self, message):
self.status = WARNING
self.message = message
def is_success(self):
return self.status == SUCCESS
def is_failure(self):
return self.status == FAILURE
def is_warning(self):
return self.status == WARNING
def debug(self):
data = {
'Request method': self.response['request']['method'],
'Request URL': self.response['request']['url'],
'Request parameters': self.response['request']['params'],
'Response code': self.response['http_code'],
'Response body': self.response['body'],
'Result status': self.status,
'Result message': self.message
}
debug = ''
for key in data:
debug += '{0}: {1}\n'.format(key, data[key])
return debug
|
wildone/pyaem
|
pyaem2/result.py
|
Python
|
mit
| 1,262
|
#!/bin/python
# -*- coding: utf-8 -*-
"""Print."""
from .exceptions import InvalidUsage
from flask import request
def update_token(cursor, connection, token, new_token):
"""Validate Token"""
if request.endpoint == 'login':
args = request.args.to_dict()
user_name = args['user_name']
query = """
UPDATE `user` set token='{}' where user_name='{}'
""".format(new_token, user_name)
else:
query = """
UPDATE `user` set token='{}' where token = '{}'
""".format(new_token, token)
try:
cursor.execute(query)
except Exception as e:
raise InvalidUsage(
'Query Error check logs for details', status_code=400
)
try:
connection.commit()
except Warning as e:
raise e
|
shridarpatil/RestApiz
|
utils/update_token.py
|
Python
|
mit
| 840
|
from mock import (
call,
Mock,
patch,
)
import os
from unittest import TestCase
from upload_packages import (
get_args,
get_changes,
main,
upload_package,
upload_packages,
)
from utils import temp_dir
CHANGES_DATA = """\
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
Format: 1.8
Date: Mon, 10 Aug 2015 20:16:09 +0000
Source: juju-core
Binary: juju-core juju juju-local juju-local-kvm
Architecture: source
Version: 1.24.5-0ubuntu1~14.04.1~juju1
Distribution: trusty
"""
class UploadPackageTestCase(TestCase):
def test_get_args(self):
args = get_args(['-d', '-c', 'creds', 'ppa:team/archive', 'a', 'b'])
self.assertEqual('ppa:team/archive', args.ppa)
self.assertEqual(['a', 'b'], args.package_dirs)
self.assertEqual('creds', args.credentials)
self.assertTrue(args.dry_run)
# Cannot autospec staticmethods.
@patch('upload_packages.Launchpad.login_with')
@patch('upload_packages.upload_packages', autospec=True)
def test_main(self, up_mock, lw_mock):
lp = object()
lw_mock.return_value = lp
return_code = main(['-d', '-c', 'creds', 'ppa:team/archive', 'a', 'b'])
self.assertEqual(0, return_code)
lw_mock.assert_called_once_with(
'upload-packages', service_root='https://api.launchpad.net',
version='devel', credentials_file='creds')
up_mock.assert_called_once_with(
lp, 'ppa:team/archive', ['a', 'b'], dry_run=True)
def test_get_changes(self):
with temp_dir() as package_dir:
changes_path = os.path.join(package_dir, 'foo_source.changes')
with open(changes_path, 'w') as changes_file:
changes_file.write(CHANGES_DATA)
with open(os.path.join(package_dir, 'foo.dsc'), 'w') as other_file:
other_file.write('other_file')
source_name, version, file_name = get_changes(package_dir)
self.assertEqual('juju-core', source_name)
self.assertEqual('1.24.5-0ubuntu1~14.04.1~juju1', version)
self.assertEqual('foo_source.changes', file_name)
@patch('subprocess.check_call', autospec=True)
def test_upload_package_uploaded(self, cc_mock):
archive = Mock(getPublishedSources=Mock())
archive.getPublishedSources.return_value = [
Mock(source_package_version='1.24.5-0ubuntu1~14.04.1~juju1',
source_package_name='juju-core')]
with temp_dir() as package_dir:
changes_path = os.path.join(package_dir, 'foo_source.changes')
with open(changes_path, 'w') as changes_file:
changes_file.write(CHANGES_DATA)
result = upload_package(
'ppa:bar/baz', archive, package_dir, dry_run=False)
self.assertFalse(result)
self.assertEqual(0, cc_mock.call_count)
archive.getPublishedSources.assert_called_once_with(
source_name='juju-core', version='1.24.5-0ubuntu1~14.04.1~juju1')
@patch('subprocess.check_call', autospec=True)
@patch('upload_packages.get_changes', autospec=True)
def test_upload_package_uploading(self, gc_mock, cc_mock):
gc_mock.return_value = (
'juju-core', '1.24.5-0ubuntu1~14.04.1~juju1', 'foo_source.changes')
archive = Mock(getPublishedSources=Mock())
archive.getPublishedSources.return_value = []
with temp_dir() as package_dir:
result = upload_package(
'ppa:bar/baz', archive, package_dir, dry_run=False)
self.assertTrue(result)
gc_mock.assert_called_once_with(package_dir)
cc_mock.assert_called_once_with(
['dput', 'ppa:bar/baz', 'foo_source.changes'], cwd=package_dir)
archive.getPublishedSources.assert_called_once_with(
source_name='juju-core', version='1.24.5-0ubuntu1~14.04.1~juju1')
@patch('upload_packages.upload_package', autospec=True)
def test_upload_packages(self, up_mock):
# assigning a side_effect requires an iterable, unlike instantiation.
up_mock.side_effect = iter([False, True])
team = Mock(name='bar', getPPAByName=Mock())
team.getPPAByName.return_value = 'baz'
lp = Mock(people={'bar': team})
with temp_dir() as package_dir1:
with temp_dir() as package_dir2:
upload_packages(
lp, 'ppa:bar/baz', [package_dir1, package_dir2],
dry_run=False)
call1 = call('ppa:bar/baz', 'baz', package_dir1, dry_run=False)
call2 = call('ppa:bar/baz', 'baz', package_dir2, dry_run=False)
self.assertEqual([call1, call2], up_mock.mock_calls)
team.getPPAByName.assert_called_once_with(name='baz')
|
mjs/juju
|
releasetests/tests/test_upload_packages.py
|
Python
|
agpl-3.0
| 4,745
|
import os
import numpy as np
import pandas as pd
import datetime as dt
from datetime import timedelta
import folium
from shapely.geometry import Point
import geopandas as gpd
import pkg_resources as pkg
current_path = os.path.abspath(".") + '/tmp_data'
def read_wwln(file):
"""Read WWLN file"""
tmp = pd.read_csv(file, parse_dates=True, header=None,
names=['date', 'time', 'lat', 'lon', 'err', '#sta'])
# Generate a list of datetime objects with time to miliseconds
list_dts = []
for dvals, tvals, in zip(tmp['date'], tmp['time']):
list_dts.append(gen_datetime(dvals, tvals))
dtdf = pd.DataFrame(list_dts, columns=['datetime'])
result = pd.concat([dtdf, tmp], axis=1)
result = result.drop(['date', 'time'], axis=1)
return result
def wwln_to_geopandas(file):
"""Read data from Blitzorg first using pandas.read_csv for convienence, and
then convert lat, lon points to a shaleply geometry POINT type.
Finally put this gemoetry into a geopandas dataframe and return it."""
tmp = pd.read_csv(file, parse_dates=True, header=None,
names=['date', 'time', 'lat', 'lon', 'err', '#sta'])
list_dts = [gen_datetime(dvals, tvals)
for dvals, tvals in zip(tmp['date'], tmp['time'])]
points = [[Point(tmp.lat[row], tmp.lon[row]), dt]
for row, dt in zip(tmp.index, list_dts)]
df = gpd.GeoDataFrame(points, columns=['geometry', 'dt'])
return df
def gen_listfiles(ext, data_path=current_path, start_date=None, end_date=None):
"""**Generate list of files in data directory**
Using a specified data path and extension generate list of files in data
directory. If start_date and end_date aren't specified, all files in the
data directory are selected.
:paramter data_path: string
:parameter ext: string
:parameter start_date: time string in format mm-dd-yyy (optional)
:paramater end_date: time string in format mm-dd-yyy (optional)
:Example:
>>> gen_listfiles(data_path='./data', ext='.loc', start_date='01-01-2016',
end_date='01-10-2016')
"""
# make list of all files in data directory with certain extension ext
listfiles = [fn for fn in os.listdir(data_path) if (fn.endswith(ext))]
# check if start_date & end_date are set
if (start_date is not None) or (end_date is not None):
all_dates = pd.date_range(start_date, end_date, freq='D')
# make list with files in selected range
files = []
for date in all_dates:
yyyy = str(date.year)
mm = "%02d" % (date.month,)
dd = "%02d" % (date.day,)
file = 'A'+yyyy+mm+dd+ext
files.append(file)
# compare and return matches
listfiles = set(listfiles).intersection(files)
return files
# if start and end dates aren't set use all files in data dir
else:
return listfiles
def count_lightning(datain, time_step):
"""**Count lightning strikes detected within a defined time_step**
Generate time intervals according to the time_step defined and count
lightning strikes in these intervals. Statistics are also calculated for
lightning detection errors and the number of stations and added to an
output dataframe. Time stamps in output dataframe correspond to center of
time periods in which lightning are counted.
:paramter datain: dataframe (lightning data)
:parameter time_step: integer (time step in minutes)
:Example:
>>> count_lightning(LN_data, time_step)
"""
if(1440 % time_step == 0): # check if time_step is multiple of 1 day
i = 0
# run for loop for all time steps in one day
for time_interval in gen_time_intervals(extract_date(datain['datetime'].iloc[0]),
(extract_date(datain['datetime'].iloc[0])+timedelta(days=1)),
timedelta(minutes=time_step)):
# select data in given time_interval
tmp_LN_data = datain.loc[(datain['datetime'] >= time_interval) &
(datain['datetime'] < time_interval +
timedelta(minutes=time_step))]
# calculate stats
stats_err = gen_stats(tmp_LN_data['err'])
stats_sta = gen_stats(tmp_LN_data['#sta'])
d = {'count': stats_err['count'],
'err_mean': stats_err['mean'],
'err_std': stats_err['std'],
'err_min': stats_err['min'],
'err_max': stats_err['max'],
'#sta_mean': stats_sta['mean'],
'#sta_std': stats_sta['std'],
'#sta_min': stats_sta['min'],
'#sta_max': stats_sta['max']}
col_names = [k for k in d.keys()]
df_index = time_interval+timedelta(minutes=(time_step/2))
temp_LN_count = pd.DataFrame(d, index=[df_index],
columns=col_names)
# add data to existing df
if(i >= 1):
LN_count = LN_count.append(temp_LN_count)
else:
LN_count = temp_LN_count
i = i + 1
return LN_count
else:
print("time_step {0} multiple of 1 day (1400 min)".format(time_step))
def gen_stats(datain):
"""**Calculate lightning statitics and return a dictionary**
Using a raw data in certain time interval calculate mean, std, min, max value for detection
error or number of stations.
:paramter datain: vector with detection error or number of stations
:Example:
>>> gen_stats(lighning_data['#sta'])
"""
tmp_dic={}
tmp_dic['count'] = len(datain)
# if there is no lightning strikes set nan values for all stats parameters
if(tmp_dic['count'] == 0):
tmp_dic['mean'] = np.nan
tmp_dic['std'] = np.nan
tmp_dic['min'] = np.nan
tmp_dic['max'] = np.nan
else:
tmp_dic['mean'] = np.mean(datain)
tmp_dic['std'] = np.std(datain)
tmp_dic['min'] = min(datain)
tmp_dic['max'] = max(datain)
return tmp_dic
def gen_time_intervals(start, end, delta):
"""Create time intervals with timedelta periods using datetime for start
and end
"""
curr = start
while curr < end:
yield curr
curr += delta
def extract_date(value):
"""
Convert timestamp to datetime and set everything to zero except a date
"""
dtime = value.to_datetime()
dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) -
timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond))
return dtime
def gen_datetime(dvals, tvals):
dvals = [int(t) for t in dvals.split('/')]
year, month, day = dvals
hh, mm, sec_micro = tvals.split(':')
hh = int(hh)
mm = int(mm)
ss, mss = sec_micro.split('.')
ss = int(ss)
mss = int(mss)
return dt.datetime(year, month, day, hh, mm, ss, mss)
|
jcalogovic/lightning
|
stormstats/misc.py
|
Python
|
mit
| 7,099
|
import os
from getgist import GetGistCommons
from tests.conftest import TEST_FILE_CONTENTS
def test_read_file_without_argument(local):
assert local.read() == TEST_FILE_CONTENTS
def test_read_file_with_argument(local):
assert local.read(local.file_path) == TEST_FILE_CONTENTS
def test_read_non_existet_file(mocker, local):
oops = mocker.patch.object(GetGistCommons, "oops")
assert not local.read(".no_gist")
oops.assert_called_once_with("Sorry, but .no_gist does not exist")
def test_read_directory(mocker, local):
oops = mocker.patch.object(GetGistCommons, "oops")
cwd = os.getcwd()
assert not local.read(cwd)
oops.assert_called_once_with("Sorry, but {} is not a file".format(cwd))
def test_read_file_inside_directory(temporary_file):
assert temporary_file.read() == TEST_FILE_CONTENTS
def test_simple_backup(local):
backup = "{}.bkp".format(local.file_path)
local.backup()
assert os.path.exists(backup)
assert local.read(backup) == TEST_FILE_CONTENTS
def test_two_backups(local):
backup = "{}.bkp".format(local.file_path)
with open(backup, "w") as fobj:
fobj.write("first backup")
new_backup = "{}1".format(backup)
local.backup()
assert os.path.exists(new_backup)
assert local.read(new_backup) == TEST_FILE_CONTENTS
def test_multi_backups(local):
for ext in ("", ".bkp", ".bkp1", ".bkp2", ".bkp3"):
backup = "{}{}".format(local.file_path, ext)
with open(backup, "w") as fobj:
fobj.write(ext or TEST_FILE_CONTENTS)
new_backup = "{}.bkp4".format(local.file_path)
local.backup()
assert os.path.exists(new_backup)
assert local.read(new_backup) == TEST_FILE_CONTENTS
def test_write_file(local):
os.remove(local.file_path)
assert not os.path.exists(local.file_path)
local.save(TEST_FILE_CONTENTS)
assert os.path.exists(local.file_path)
assert local.read() == TEST_FILE_CONTENTS
def test_write_file_overwrite(local, mocker):
confirm = mocker.patch("getgist.local.confirm")
confirm.return_value = True
with open(local.file_path, "w") as fobj:
fobj.write("nope")
assert os.path.exists(local.file_path)
assert local.read() == "nope"
local.save(TEST_FILE_CONTENTS)
assert os.path.exists(local.file_path)
assert local.read() == TEST_FILE_CONTENTS
assert not os.path.exists("{}.bkp1".format(local.file_path))
def test_write_file_with_backup(local, mocker):
confirm = mocker.patch("getgist.local.confirm")
confirm.return_value = False
local.save(TEST_FILE_CONTENTS)
assert os.path.exists(local.file_path)
assert local.read() == TEST_FILE_CONTENTS
backup = "{}.bkp1".format(local.file_path)
local.save("new contents")
assert os.path.exists(local.file_path)
assert os.path.exists(backup)
assert local.read() == "new contents"
assert local.read(backup) == TEST_FILE_CONTENTS
def test_write_file_with_multiple_backup(local, mocker):
for ext in ("", ".bkp", ".bkp1", ".bkp2", ".bkp3"):
backup = "{}{}".format(local.file_path, ext)
with open(backup, "w") as fobj:
fobj.write(ext)
confirm = mocker.patch("getgist.local.confirm")
confirm.return_value = False
local.save(TEST_FILE_CONTENTS)
assert local.read() == TEST_FILE_CONTENTS
for ext in ("", ".bkp", ".bkp1", ".bkp2", ".bkp3", ".bkp4"):
backup = "{}{}".format(local.file_path, ext)
assert os.path.exists(backup)
|
cuducos/getgist
|
tests/test_local_tools.py
|
Python
|
mit
| 3,495
|
import json
from tests import create_admin_authorization_header
def test_create_event(notify_api):
with notify_api.test_request_context():
with notify_api.test_client() as client:
data = {
'event_type': 'sucessful_login',
'data': {'something': 'random', 'in_fact': 'could be anything'}
}
path = '/events'
auth_header = create_admin_authorization_header()
headers = [('Content-Type', 'application/json'), auth_header]
response = client.post(
path,
data=json.dumps(data),
headers=headers)
assert response.status_code == 201
resp_json = json.loads(response.get_data(as_text=True))['data']
assert resp_json['event_type'] == data['event_type']
assert resp_json['data']['something'] == data['data']['something']
assert resp_json['data']['in_fact'] == data['data']['in_fact']
|
alphagov/notifications-api
|
tests/app/events/test_rest.py
|
Python
|
mit
| 996
|
#!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple wave robot WSGI application and forwarding middleware."""
import webob
import webob.exc
from api import robot_abstract
import logging
class RobotMiddleware(object):
"""WSGI middleware that routes /_wave/ requests to a robot wsgi app."""
def __init__(self, robot_app, main_app):
self._robot_app = robot_app
self._main_app = main_app
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
if path.startswith('/_wave/'):
return self._robot_app(environ, start_response)
return self._main_app(environ, start_response)
class SimpleRobotApp(object):
"""WSGI application for serving an abstract robot.
This is just like the Robot class in the Wave api, but it uses the plain WebOb
request/response objects instead of the analogous AppEngine objects.
"""
def __init__(self, robot):
self._robot = robot
def capabilities(self):
xml = self._robot.GetCapabilitiesXml()
response = webob.Response(content_type='text/xml', body=xml)
response.cache_control = 'Private' # XXX
return response
def profile(self):
xml = self._robot.GetProfileJson()
response = webob.Response(content_type='application/json', body=xml)
response.cache_control = 'Private' # XXX
return response
def jsonrpc(self, req):
json_body = req.body
logging.info('Incoming: %s', json_body)
context, events = robot_abstract.ParseJSONBody(json_body)
for event in events:
self._robot.HandleEvent(event, context)
json_response = robot_abstract.SerializeContext(
context, self._robot.version)
logging.info('Outgoing: %s', json_response)
return webob.Response(content_type='application/json',
body=json_response)
def __call__(self, environ, start_response):
req = webob.Request(environ)
if req.path_info == '/_wave/capabilities.xml' and req.method == 'GET':
response = self.capabilities()
elif req.path_info == '/_wave/robot/profile' and req.method == 'GET':
response = self.profile()
elif req.path_info == '/_wave/robot/jsonrpc' and req.method == 'POST':
response = self.jsonrpc(req)
else:
response = webob.exc.HTTPNotFound()
return response(environ, start_response)
|
alexisvincent/downy
|
app.py
|
Python
|
apache-2.0
| 2,858
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
location: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> Iterable["_models.ListUsagesResult"]:
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_12_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/operations/_usage_operations.py
|
Python
|
mit
| 6,197
|
# Date manipulation tool
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2015 Alexandre Dulaunoy - a@foo.be
import calendar
import datetime
import os
import re
def getDateinMonth(year=None, month=None):
if year is None or month is None:
return False
dater = calendar.monthrange(year, month)
startdate = datetime.date(year, month, 1)
enddate = startdate + datetime.timedelta(days=dater[1])
delta = datetime.timedelta(days=1)
d = startdate
r = []
while d < enddate:
r.append(d.strftime("%Y%m%d"))
d += delta
return r
def dumppath(prefix=None, d=None, zero=True):
if d is None:
return False
if prefix is None:
prefix = ''
l = []
for v in d:
year = v[:4]
if not zero:
month = re.sub('^0', '', v[4:6])
day = re.sub('^0', '', v[6:8])
else:
month = v[4:6]
day = v[6:8]
l.append(os.path.join(prefix,year, month, day))
return l
if __name__ == "__main__":
print getDateinMonth(year=2015, month=1)
print dumppath(prefix='/var/logs/',d=getDateinMonth(year=2015, month=1), zero=False)
|
dragonresearchgroup/conficker-research-tools
|
lib/datetool.py
|
Python
|
bsd-3-clause
| 1,203
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
rebost/django
|
django/conf/locale/pl/formats.py
|
Python
|
bsd-3-clause
| 1,327
|
import os
import sys
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
settings.configure(
DEBUG=True,
DATABASE_ENGINE='sqlite3',
DATABASE_NAME=os.path.join(DIRNAME, 'database.db'),
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
},
ROOT_URLCONF='/',
MIDDLEWARE_CLASSES=(),
TEMPLATE_CONTEXT_PROCESSORS=[
'django.template.context_processors.request'
],
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request'
],
},
},
],
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'tabbed_admin',
'tabbed_admin.tests'
)
)
try:
# Django < 1.8
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
except ImportError:
# Django >= 1.8
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
try:
# Django < 1.7
from django.core.management import setup_environ
setup_environ(settings)
failures = test_runner.run_tests(['tabbed_admin'])
except:
# Django >= 1.7
import django
django.setup()
failures = test_runner.run_tests(['tabbed_admin'])
if failures:
sys.exit(failures)
|
jsoa/django-tabbed-admin
|
tabbed_admin/tests/runtests.py
|
Python
|
bsd-3-clause
| 1,608
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Using `from elasticsearch import *` would break elasticsearch mocking used in unit test.
import elasticsearch
import pendulum
from elasticsearch_dsl import Search
from airflow.utils import timezone
from airflow.utils.helpers import parse_template_string
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
class ElasticsearchTaskHandler(FileTaskHandler, LoggingMixin):
PAGE = 0
MAX_LINE_PER_PAGE = 1000
"""
ElasticsearchTaskHandler is a python log handler that
reads logs from Elasticsearch. Note logs are not directly
indexed into Elasticsearch. Instead, it flushes logs
into local files. Additional software setup is required
to index the log into Elasticsearch, such as using
Filebeat and Logstash.
To efficiently query and sort Elasticsearch results, we assume each
log message has a field `log_id` consists of ti primary keys:
`log_id = {dag_id}-{task_id}-{execution_date}-{try_number}`
Log messages with specific log_id are sorted based on `offset`,
which is a unique integer indicates log message's order.
Timestamp here are unreliable because multiple log messages
might have the same timestamp.
"""
def __init__(self, base_log_folder, filename_template,
log_id_template, end_of_log_mark,
host='localhost:9200'):
"""
:param base_log_folder: base folder to store logs locally
:param log_id_template: log id template
:param host: Elasticsearch host name
"""
super(ElasticsearchTaskHandler, self).__init__(
base_log_folder, filename_template)
self.closed = False
self.log_id_template, self.log_id_jinja_template = \
parse_template_string(log_id_template)
self.client = elasticsearch.Elasticsearch([host])
self.mark_end_on_close = True
self.end_of_log_mark = end_of_log_mark
def _render_log_id(self, ti, try_number):
if self.log_id_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return self.log_id_jinja_template.render(**jinja_context)
return self.log_id_template.format(dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti
.execution_date.isoformat(),
try_number=try_number)
def _read(self, ti, try_number, metadata=None):
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of log documents and metadata.
"""
if not metadata:
metadata = {'offset': 0}
if 'offset' not in metadata:
metadata['offset'] = 0
offset = metadata['offset']
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset)
next_offset = offset if not logs else logs[-1].offset
metadata['offset'] = next_offset
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata['end_of_log'] = False if not logs \
else logs[-1].message == self.end_of_log_mark.strip()
cur_ts = pendulum.now()
# Assume end of log after not receiving new log for 5 min,
# as executor heartbeat is 1 min and there might be some
# delay before Elasticsearch makes the log available.
if 'last_log_timestamp' in metadata:
last_log_ts = timezone.parse(metadata['last_log_timestamp'])
if cur_ts.diff(last_log_ts).in_minutes() >= 5:
metadata['end_of_log'] = True
if offset != next_offset or 'last_log_timestamp' not in metadata:
metadata['last_log_timestamp'] = str(cur_ts)
message = '\n'.join([log.message for log in logs])
return message, metadata
def es_read(self, log_id, offset):
"""
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
"""
# Offset is the unique key for sorting logs given log_id.
s = Search(using=self.client) \
.query('match_phrase', log_id=log_id) \
.sort('offset')
s = s.filter('range', offset={'gt': offset})
logs = []
if s.count() != 0:
try:
logs = s[self.MAX_LINE_PER_PAGE * self.PAGE:self.MAX_LINE_PER_PAGE] \
.execute()
except Exception as e:
msg = 'Could not read log with log_id: {}, ' \
'error: {}'.format(log_id, str(e))
self.log.exception(msg)
return logs
def set_context(self, ti):
super(ElasticsearchTaskHandler, self).set_context(ti)
self.mark_end_on_close = not ti.raw
def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
if not self.mark_end_on_close:
self.closed = True
return
# Case which context of the handler was not set.
if self.handler is None:
self.closed = True
return
# Reopen the file stream, because FileHandler.close() would be called
# first in logging.shutdown() and the stream in it would be set to None.
if self.handler.stream is None or self.handler.stream.closed:
self.handler.stream = self.handler._open()
# Mark the end of file using end of log mark,
# so we know where to stop while auto-tailing.
self.handler.stream.write(self.end_of_log_mark)
super(ElasticsearchTaskHandler, self).close()
self.closed = True
|
fenglu-g/incubator-airflow
|
airflow/utils/log/es_task_handler.py
|
Python
|
apache-2.0
| 7,309
|
import shutil
import tempfile
import subprocess
import os
import sys
import time
from opendm import log
from opendm import system
import locale
from string import Template
class GrassEngine:
def __init__(self):
self.grass_binary = system.which('grass7') or \
system.which('grass72') or \
system.which('grass74') or \
system.which('grass76') or \
shutil.which('grass78') or \
shutil.which('grass80')
if self.grass_binary is None:
log.ODM_WARNING("Could not find a GRASS 7 executable. GRASS scripts will not work.")
else:
log.ODM_INFO("Initializing GRASS engine using {}".format(self.grass_binary))
def create_context(self, serialized_context = {}):
if self.grass_binary is None: raise GrassEngineException("GRASS engine is unavailable")
return GrassContext(self.grass_binary, **serialized_context)
class GrassContext:
def __init__(self, grass_binary, tmpdir = None, template_args = {}, location = None, auto_cleanup=True):
self.grass_binary = grass_binary
if tmpdir is None:
tmpdir = tempfile.mkdtemp('_grass_engine')
self.tmpdir = tmpdir
self.template_args = template_args
self.location = location
self.auto_cleanup = auto_cleanup
def get_cwd(self):
return self.tmpdir
def add_file(self, filename, source, use_as_location=False):
param = os.path.splitext(filename)[0] # filename without extension
dst_path = os.path.abspath(os.path.join(self.get_cwd(), filename))
with open(dst_path, 'w') as f:
f.write(source)
self.template_args[param] = dst_path
if use_as_location:
self.set_location(self.template_args[param])
return dst_path
def add_param(self, param, value):
self.template_args[param] = value
def set_location(self, location):
"""
:param location: either a "epsg:XXXXX" string or a path to a geospatial file defining the location
"""
if not location.lower().startswith('epsg:'):
location = os.path.abspath(location)
self.location = location
def execute(self, script):
"""
:param script: path to .grass script
:return: script output
"""
if self.location is None: raise GrassEngineException("Location is not set")
script = os.path.abspath(script)
# Create grass script via template substitution
try:
with open(script) as f:
script_content = f.read()
except FileNotFoundError:
raise GrassEngineException("Script does not exist: {}".format(script))
tmpl = Template(script_content)
# Write script to disk
if not os.path.exists(self.get_cwd()):
os.mkdir(self.get_cwd())
with open(os.path.join(self.get_cwd(), 'script.sh'), 'w') as f:
f.write(tmpl.substitute(self.template_args))
# Execute it
log.ODM_INFO("Executing grass script from {}: {} --tmp-location {} --exec bash script.sh".format(self.get_cwd(), self.grass_binary, self.location))
env = os.environ.copy()
env["GRASS_ADDON_PATH"] = env.get("GRASS_ADDON_PATH", "") + os.path.abspath(os.path.join("opendm/grass/addons"))
env["LC_ALL"] = "C.UTF-8"
filename = os.path.join(self.get_cwd(), 'output.log')
with open(filename, 'wb') as writer, open(filename, 'rb', 1) as reader:
p = subprocess.Popen([self.grass_binary, '--tmp-location', self.location, '--exec', 'bash', 'script.sh'],
cwd=self.get_cwd(), stdout=subprocess.PIPE, stderr=writer, env=env)
while p.poll() is None:
sys.stdout.write(reader.read().decode('utf8'))
time.sleep(0.5)
# Read the remaining
sys.stdout.write(reader.read().decode('utf8'))
out, err = p.communicate()
out = out.decode('utf-8').strip()
if p.returncode == 0:
return out
else:
raise GrassEngineException("Could not execute GRASS script {} from {}: {}".format(script, self.get_cwd(), err))
def serialize(self):
return {
'tmpdir': self.tmpdir,
'template_args': self.template_args,
'location': self.location,
'auto_cleanup': self.auto_cleanup
}
def cleanup(self):
if os.path.exists(self.get_cwd()):
shutil.rmtree(self.get_cwd())
def __del__(self):
if self.auto_cleanup:
self.cleanup()
class GrassEngineException(Exception):
pass
def cleanup_grass_context(serialized_context):
ctx = grass.create_context(serialized_context)
ctx.cleanup()
grass = GrassEngine()
|
OpenDroneMap/OpenDroneMap
|
opendm/grass_engine.py
|
Python
|
gpl-3.0
| 4,970
|
#! /usr/bin/env python
'''
oscutils.py -- Open Sound Control builtins for MFP
Copyright (c) 2013 Bill Gribble <grib@billgribble.com>
'''
from ..processor import Processor
from ..mfp_app import MFPApp
from ..bang import Uninit
class OSCPacket(object):
def __init__(self, payload):
self.payload = payload
class OSCIn (Processor):
doc_tooltip_obj = "Open Sound Control message receive"
doc_tooltip_inlet = ["Config input"]
doc_tooltip_outlet = ["OSC data output"]
def __init__(self, init_type, init_args, patch, scope, name):
self.path = None
self.types = None
self.handler = None
Processor.__init__(self, 1, 1, init_type, init_args, patch, scope, name)
initargs, kwargs = self.parse_args(init_args)
if len(initargs) > 0:
self.path = initargs[0]
if len(initargs) > 1:
self.types = initargs[1]
def trigger(self):
need_update = False
if isinstance(self.inlets[0], OSCPacket):
self.outlets[0] = self.inlets[0].payload
self.inlets[0] = Uninit
elif isinstance(self.inlets[0], dict):
path = self.inlets[0].get("path")
if path:
self.path = path
need_update = True
types = self.inlets[0].get("types")
if types:
self.types = types
need_update = True
if need_update:
if self.handler is not None:
MFPApp().osc_mgr.del_method(self.handler, self.types)
self.handler = None
self.handler = MFPApp().osc_mgr.add_method(self.path, self.types, self._handler)
class OSCOut (Processor):
doc_tooltip_obj = "Open Sound Control message send"
doc_tooltip_inlet = ["Message data",
"Destination host:port (UDP) (default: initarg 0)",
"OSC path (default: initarg 1)" ]
def __init__(self, init_type, init_args, patch, scope, name):
self.host = None
self.port = None
self.path = None
Processor.__init__(self, 3, 0, init_type, init_args, patch, scope, name)
initargs, kwargs = self.parse_args(init_args)
if len(initargs) > 0:
parts = initargs[0].split(":")
self.host = parts[0]
if len(parts) > 1:
self.port = int(parts[1])
if len(initargs) > 1:
self.path = initargs[1]
def trigger(self):
if self.inlets[2] is not Uninit:
self.path = self.inlets[2]
self.inlets[2] = Uninit
if self.inlets[1] is not Uninit:
if isinstance(self.inlets[1], str):
parts = self.inlets[1].split(":")
self.host = parts[0]
if len(parts) > 1:
self.port = int(parts[1])
elif isinstance(self.inlets[1], (float, int)):
self.port = int(self.inlets[1])
self.inlets[1] = Uninit
MFPApp().osc_mgr.send((self.host, self.port), self.path, self.inlets[0])
self.inlets[0] = Uninit
def register():
MFPApp().register("osc_in", OSCIn)
MFPApp().register("osc_out", OSCOut)
|
bgribble/mfp
|
mfp/builtins/oscutils.py
|
Python
|
gpl-2.0
| 3,258
|
# Written by Jelle Roozenburg
# see LICENSE.txt for license information
import re, sys, os
from traceback import print_exc
from Tribler.__init__ import LIBRARYNAME
WORDS_REGEXP = re.compile('[a-zA-Z0-9]+')
DEBUG = False
class XXXFilter:
def __init__(self, install_dir):
termfilename = os.path.join(install_dir, LIBRARYNAME, 'Category','filter_terms.filter')
self.xxx_terms, self.xxx_searchterms = self.initTerms(termfilename)
def initTerms(self, filename):
terms = set()
searchterms = set()
try:
f = file(filename, 'r')
lines = f.read().lower().splitlines()
for line in lines:
if line.startswith('*'):
searchterms.add(line[1:])
else:
terms.add(line)
f.close()
except:
if DEBUG:
print_exc()
if DEBUG:
print 'Read %d XXX terms from file %s' % (len(terms)+len(searchterms), filename)
return terms, searchterms
def _getWords(self, string):
return [a.lower() for a in WORDS_REGEXP.findall(string)]
def isXXXTorrent(self, files_list, torrent_name, tracker, comment=None):
if tracker:
tracker = tracker.lower().replace('http://', '').replace('announce','')
else:
tracker = ''
terms = [a[0].lower() for a in files_list]
is_xxx = (len(filter(self.isXXX, terms)) > 0 or
self.isXXX(torrent_name, False) or
self.isXXX(tracker, False) or
(comment and self.isXXX(comment, False))
)
if DEBUG:
if is_xxx:
print 'Torrent is XXX: %s %s' % (torrent_name, tracker)
else:
print 'Torrent is NOT XXX: %s %s' % (torrent_name, tracker)
return is_xxx
def isXXX(self, s, isFilename=True):
s = s.lower()
if self.isXXXTerm(s): # We have also put some full titles in the filter file
return True
if not self.isAudio(s) and self.foundXXXTerm(s):
return True
words = self._getWords(s)
words2 = [' '.join(words[i:i+2]) for i in xrange(0, len(words)-1)]
num_xxx = len([w for w in words+words2 if self.isXXXTerm(w, s)])
if isFilename and self.isAudio(s):
return num_xxx > 2 # almost never classify mp3 as porn
else:
return num_xxx > 0
def foundXXXTerm(self, s):
for term in self.xxx_searchterms:
if term in s:
if DEBUG:
print 'XXXFilter: Found term "%s" in %s' % (term, s)
return True
return False
def isXXXTerm(self, s, title=None):
# check if term-(e)s is in xxx-terms
s = s.lower()
if s in self.xxx_terms:
if DEBUG:
print 'XXXFilter: "%s" is dirty%s' % (s, title and ' in %s' % title or '')
return True
if s.endswith('es'):
if s[:-2] in self.xxx_terms:
if DEBUG:
print 'XXXFilter: "%s" is dirty%s' % (s[:-2], title and ' in %s' % title or '')
return True
elif s.endswith('s') or s.endswith('n'):
if s[:-1] in self.xxx_terms:
if DEBUG:
print 'XXXFilter: "%s" is dirty%s' % (s[:-1], title and ' in %s' % title or '')
return True
return False
audio_extensions = ['cda', 'flac', 'm3u', 'mp2', 'mp3', 'md5', 'vorbis', 'wav', 'wma', 'ogg']
def isAudio(self, s):
return s[s.rfind('.')+1:] in self.audio_extensions
|
egbertbouman/tribler-g
|
Tribler/Category/FamilyFilter.py
|
Python
|
lgpl-2.1
| 3,896
|
#* PYTHON EMAIL PROJECT *#
#* tests.py *#
#* Fordham CSS September 25 *#
#* ------------------------------------ *#
#* Unit tests for PyDate program *#
from Email_Object import Email
from Get_Emails import Set_Priority
import unittest
# Unit Tests for Set_Priority function
class Test_Set_Priority(unittest.TestCase):
# Test domain check
def test_high_domain(self):
email = Email('test@fordham.edu', '', 'Blah', '', '')
EMAILS = [email]
hi,lo = Set_Priority(EMAILS)
self.assertEqual(len(hi),1)
self.assertFalse(lo)
# Test subject check
def test_high_subject(self):
email = Email('test@test.com', '', 'Important', '', '')
EMAILS = [email]
hi,lo = Set_Priority(EMAILS)
self.assertEqual(len(hi),1)
self.assertFalse(lo)
# Test default
def test_low_default(self):
email = Email('test@test.com', '', 'None', '', '')
EMAILS = [email]
hi,lo = Set_Priority(EMAILS)
self.assertEqual(len(lo),1)
self.assertFalse(hi)
# Test Multiple cases
def test_multi_cases(self):
#Important by sender
email_sender = Email('test@fordham.edu', '', 'Blah', '', '')
#Important by subject
email_subject = Email('test@test.com', '', 'Important', '', '')
#Not Important
email_lo = Email('test@test.com', '', 'None', '', '')
EMAILS = [email_sender, email_subject, email_lo]
hi,lo = Set_Priority(EMAILS)
self.assertEqual(len(hi),2)
self.assertEqual(len(lo),1)
if __name__ == '__main__':
unittest.main()
|
nickdibari/pydate
|
tests.py
|
Python
|
mit
| 1,632
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import time
import wx
from threading import Event
from win32gui import FindWindow
# Local imports
import eg
from eg import FolderItem, MacroItem
from eg.WinApi.Utils import BringHwndToFront
from eg.WinApi.Dynamic import GetForegroundWindow
from JumpIfElse import JumpIfElse
from NewJumpIf import NewJumpIf
from PythonScript import PythonScript
from ShowMessageBox import ShowMessageBox
from ShowOSD import ShowOSD
eg.RegisterPlugin(
name = "EventGhost",
author = "Bitmonster",
description = (
"Actions to control events, macro flow, and the configuration tree."
),
kind = "core",
version = "1.0.7",
guid = "{9D499A2C-72B6-40B0-8C8C-995831B10BB4}",
)
class EventGhost(eg.PluginBase):
def __init__(self):
self.AddAction(PythonCommand)
self.AddAction(PythonScript)
self.AddAction(AutoRepeat)
self.AddAction(FlushEvents)
self.AddAction(Comment)
self.AddAction(DisableItem)
self.AddAction(DumpResult)
self.AddAction(EnableItem)
self.AddAction(EnableExclusive)
self.AddAction(GetItemState)
self.AddAction(NewJumpIf)
self.AddAction(JumpIfElse)
self.AddAction(JumpIfDoubleEvent)
self.AddAction(JumpIfLongPress)
self.AddAction(OpenConfig)
self.AddAction(OpenEventGhost)
self.AddAction(ShowMessageBox)
self.AddAction(ShowOSD)
self.AddAction(StopProcessing)
self.AddAction(TriggerEvent)
self.AddAction(Wait)
class EnableItem(eg.ActionBase):
name = "Enable Item"
description = "Enables an item in the tree."
iconFile = 'icons/EnableItem'
class text:
label = "Enable: %s"
text1 = "Please select the item which should be enabled:"
cantSelect = (
"The selected item type can't change its enable state.\n\n"
"Please select another item."
)
def __call__(self, link):
if link:
node = link.target
if node:
node.SetEnable(True)
return node
def Configure(self, link=None):
panel = eg.ConfigPanel(resizable=True)
if link is not None:
searchItem = link.target
else:
searchItem = None
link = eg.TreeLink(panel.dialog.treeItem)
tree = eg.TreeItemBrowseCtrl(
panel,
self.FilterFunc,
#searchFunc,
selectItem=searchItem
)
tree.SetFocus()
panel.sizer.Add(panel.StaticText(self.text.text1), 0, wx.BOTTOM, 5)
panel.sizer.Add(tree, 1, wx.EXPAND)
while panel.Affirmed():
treeItem = tree.GetSelection()
if treeItem.IsOk():
obj = tree.GetPyData(treeItem)
if self.IsSelectableItem(obj):
link.SetTarget(obj)
panel.SetResult(link)
continue
eg.MessageBox(self.text.cantSelect, parent=panel)
def FilterFunc(self, dummyObj):
return True
def GetLabel(self, link):
obj = link.target
if obj:
return self.text.label % obj.GetLabel()
return self.text.label % ''
def IsSelectableItem(self, item):
return item.isDeactivatable
class AutoRepeat(eg.ActionBase):
name = "Auto-Repeat Macro"
description = "Makes the current macro auto-repeat."
iconFile = "icons/AutoRepeat"
class text:
seconds = "seconds"
text1 = "Start first repetition after"
text2 = "with one repetition every"
text3 = "Increase repetition the next"
text4 = "to one repetition every"
def __call__(
self,
firstDelay=0.6,
startDelay=0.3,
endDelay=0.01,
sweepTime=3.0
):
event = eg.event
if event.shouldEnd.isSet():
return
elapsed = time.clock() - event.time
if elapsed < firstDelay * 0.90:
delay = firstDelay
elif sweepTime > 0.0:
sweepDelay = (
(startDelay - endDelay) *
(sweepTime - (elapsed + firstDelay)) /
sweepTime
)
if sweepDelay < 0:
sweepDelay = 0
delay = sweepDelay + endDelay
else:
delay = endDelay
event.shouldEnd.wait(delay)
if not event.shouldEnd.isSet():
eg.programCounter = (eg.currentItem.parent.childs[0], 0)
def Configure(
self,
firstDelay=0.6,
startDelay=0.3,
endDelay=0.01,
sweepTime=3.0
):
text = self.text
panel = eg.ConfigPanel()
firstDelayCtrl = panel.SpinNumCtrl(firstDelay)
startDelayCtrl = panel.SpinNumCtrl(startDelay)
sweepTimeCtrl = panel.SpinNumCtrl(sweepTime)
endDelayCtrl = panel.SpinNumCtrl(endDelay)
panel.SetColumnFlags(0, wx.ALIGN_RIGHT)
panel.AddLine(text.text1, firstDelayCtrl, text.seconds)
panel.AddLine(text.text2, startDelayCtrl, text.seconds)
panel.AddLine()
panel.AddLine(text.text3, sweepTimeCtrl, text.seconds)
panel.AddLine(text.text4, endDelayCtrl, text.seconds)
while panel.Affirmed():
panel.SetResult(
firstDelayCtrl.GetValue(),
startDelayCtrl.GetValue(),
endDelayCtrl.GetValue(),
sweepTimeCtrl.GetValue()
)
class Comment(eg.ActionBase):
name = "Comment"
description = (
"Does nothing at all. Useful for commenting your configuration."
)
iconFile = 'icons/Comment'
def __call__(self):
pass
class DisableItem(EnableItem):
name = "Disable Item"
description = "Disables an item in the tree."
iconFile = 'icons/DisableItem'
class text:
label = "Disable: %s"
text1 = "Please select the item which should be disabled:"
cantSelect = (
"The selected item type can't change its enable state.\n\n"
"Please select another item."
)
def __call__(self, link):
if link:
node = link.target
if node and node.isDeactivatable:
node.SetEnable(False)
return node
class DumpResult(eg.ActionBase):
name = "Dump Result to Log"
description = (
"Outputs the most recent `eg.result` to your EventGhost log. Useful "
"for debugging."
)
def __call__(self):
result = eg.result
print str(result)
return result
class EnableExclusive(EnableItem):
name = "Enable Item Exclusively"
description = (
"Enables a specified folder or macro in your configuration, but "
"also disables all other folders and macros that are siblings on "
"the same level in that branch of the tree."
)
iconFile = "icons/EnableExclusive"
class text:
label = "Enable Exclusively: %s"
text1 = "Please select the folder/macro which should be enabled:"
cantSelect = (
"The selected item type can't change its enable state.\n\n"
"Please select another item."
)
def __call__(self, link):
if not link:
return
node = link.target
if not node:
return
def DoIt():
node.SetEnable(True)
for child in node.parent.childs:
if child is not node and child.isDeactivatable:
child.SetEnable(False)
eg.actionThread.Call(DoIt)
def FilterFunc(self, item):
return isinstance(item, (FolderItem, MacroItem))
def IsSelectableItem(self, item):
return item.isDeactivatable
class FlushEvents(eg.ActionBase):
name = "Clear Pending Events"
description = """<rst>
Clears all unprocessed events that are currently in the processing
queue.
It is useful in case a macro has just some lengthy processing, and
events have queued up during that processing which should not be
processed.
**Example:** You have a lengthy "start system" macro which takes about
90 seconds to process. The end user will not see anything until the
projector lights up, which takes 60s. It is very likely that he presses
the remote button which starts the macro for several times in a row,
causing all of the lengthy processing to start over and over again. If
you place a "Clear Pending Events" command at the end of your macro,
all the excessive remote key presses will be discarded.
"""
iconFile = "icons/Plugin"
def __call__(self):
eg.eventThread.ClearPendingEvents()
eg.actionThread.ClearPendingEvents()
class GetItemState(EnableItem):
name = "Get Item State"
description = "Gets an item's enable state (True when enabled)."
iconFile = 'icons/DisableItem'
class text:
label = "Get State: %s"
text1 = "Please select the item whose enable state should be detected:"
cantSelect = (
"The enable state of selected item can't be detected.\n\n"
"Please select another item."
)
def __call__(self, link):
if link:
node = link.target
if node and node.isDeactivatable:
return node.isEnabled
class JumpIfDoubleEvent(eg.ActionBase):
name = "Jump If Duplicate Event"
description = (
"Jumps to another macro, if the same event that has triggered this "
"macro, happens twice in a given time."
)
iconFile = "icons/LongPress"
class text:
label = "If event arrives twice, go to: %s"
text1 = "If event arrives twice within"
text2 = "seconds,"
text3 = "jump to:"
text4 = (
"Select the macro that should be executed if the event happens "
"twice..."
)
text5 = (
"Please select the macro, which should be triggered "
"if the event is a double click."
)
def __call__(self, interval, link):
firstEvent = eg.event
# wait for the first event to release
firstEvent.shouldEnd.wait(10.0)
waitEvent = Event()
waitEvent.wasSameEvent = False
def EventFilter(event):
if event.string == firstEvent.string:
waitEvent.wasSameEvent = True
waitEvent.secondEvent = event
waitEvent.set()
return True
else:
waitEvent.set()
eg.eventThread.AddFilter(firstEvent.source, EventFilter)
waitEvent.wait(interval)
eg.eventThread.RemoveFilter(firstEvent.source, EventFilter)
if waitEvent.isSet() and waitEvent.wasSameEvent:
nextItem = link.target
nextIndex = nextItem.parent.GetChildIndex(nextItem)
eg.programCounter = (nextItem, nextIndex)
eg.event = waitEvent.secondEvent
def Configure(self, interval=0.5, link=None):
panel = eg.ConfigPanel()
text = self.text
intervalCtrl = panel.SpinNumCtrl(interval)
macroCtrl = eg.MacroSelectButton(
panel,
eg.text.General.choose,
text.text4,
text.text5,
link
)
sizer1 = eg.HBoxSizer(
(panel.StaticText(text.text1), 0, wx.ALIGN_CENTER_VERTICAL),
(intervalCtrl, 0, wx.LEFT | wx.RIGHT, 5),
(panel.StaticText(text.text2), 0, wx.ALIGN_CENTER_VERTICAL),
)
mySizer = wx.FlexGridSizer(2, 3, 5, 5)
mySizer.AddGrowableCol(1, 1)
mySizer.Add(panel.StaticText(text.text3), 0, wx.ALIGN_CENTER_VERTICAL)
mySizer.Add(macroCtrl, 1, wx.EXPAND)
panel.sizer.AddMany(((sizer1), (mySizer, 1, wx.EXPAND | wx.TOP, 5)))
while panel.Affirmed():
panel.SetResult(intervalCtrl.GetValue(), macroCtrl.GetValue())
def GetLabel(self, interval, link):
return self.text.label % (link.target.name)
class JumpIfLongPress(eg.ActionBase):
name = "Jump If Long Press"
description = (
"Jumps to another macro, if the button on the remote is held down "
"longer than the configured time."
)
iconFile = "icons/LongPress"
class text:
label = "If button held for %s sec(s), go to: %s"
text1 = "If button held for longer than"
text2 = "seconds,"
text3 = "jump to:"
text4 = "Select the long press macro..."
text5 = (
"Please select the macro, which should be triggered "
"if the event is a long event."
)
def __call__(self, interval, link):
eg.event.shouldEnd.wait(interval)
if not eg.event.shouldEnd.isSet():
nextItem = link.target
nextIndex = nextItem.parent.GetChildIndex(nextItem)
eg.programCounter = (nextItem, nextIndex)
def Configure(self, interval=2.0, link=None):
panel = eg.ConfigPanel()
text = self.text
intervalCtrl = panel.SpinNumCtrl(interval)
macroCtrl = eg.MacroSelectButton(
panel,
eg.text.General.choose,
text.text4,
text.text5,
link
)
sizer1 = eg.HBoxSizer(
(panel.StaticText(text.text1), 0, wx.ALIGN_CENTER_VERTICAL),
(intervalCtrl, 0, wx.LEFT | wx.RIGHT, 5),
(panel.StaticText(text.text2), 0, wx.ALIGN_CENTER_VERTICAL),
)
mySizer = wx.FlexGridSizer(2, 3, 5, 5)
mySizer.AddGrowableCol(1, 1)
mySizer.Add(panel.StaticText(text.text3), 0, wx.ALIGN_CENTER_VERTICAL)
mySizer.Add(macroCtrl, 1, wx.EXPAND)
panel.sizer.AddMany(((sizer1), (mySizer, 1, wx.EXPAND | wx.TOP, 5)))
while panel.Affirmed():
panel.SetResult(intervalCtrl.GetValue(), macroCtrl.GetValue())
def GetLabel(self, interval, link):
return self.text.label % (interval, link.target.name)
class OpenConfig(eg.ActionBase):
name = "Open Configuration"
description = "Opens the specified configuration dialog."
iconFile = "icons/Dialog"
class text:
text0 = "Action or plugin: "
text1 = "Select action or plugin..."
text2 = (
"Please select the action or plugin, whose configuration dialogue "
"should be opened."
)
def __call__(self, link):
wx.CallAfter(eg.document.OnCmdConfigure, link.target)
wx.CallAfter(
self.BringDialogToFront,
eg.text.General.settingsActionCaption
)
@staticmethod
def BringDialogToFront(name):
hwnd = 0
i = 0
while hwnd == 0 and i < 10000:
hwnd = FindWindow("#32770", name)
i += 1
if hwnd:
BringHwndToFront(hwnd)
def Configure(self, link=None):
panel = eg.ConfigPanel()
text = self.text
actionCtrl = eg.ActionSelectButton(
panel,
eg.text.General.choose,
text.text1,
text.text2,
link
)
mySizer = wx.FlexGridSizer(2, 2, 5, 5)
mySizer.AddGrowableCol(1)
mySizer.Add(panel.StaticText(text.text0), 0, wx.ALIGN_CENTER_VERTICAL)
mySizer.Add(actionCtrl, 1, wx.EXPAND)
panel.sizer.Add(mySizer, 1, wx.EXPAND | wx.ALL, 5)
while panel.Affirmed():
panel.SetResult(actionCtrl.GetValue())
def GetLabel(self, link):
label = link.target.GetLabel() if link else ""
return "%s: %s" % (self.name, label)
class OpenEventGhost(eg.ActionBase):
class text:
name = "Open EventGhost"
description = (
"Opens, closes, or toggles EventGhost's main window. "
"Particularly helpful when system tray icon is hidden."
)
label = (
"Open EventGhost",
"Close EventGhost",
"Toggle EventGhost",
)
def __call__(self, action = 0):
if action == 0:
show = True
elif action == 1:
show = False
elif action == 2:
if eg.document.frame:
if eg.document.frame.GetHandle() == GetForegroundWindow():
show = False
else:
show = True
else:
show = True
else:
return False
func = (eg.document.ShowFrame if show else eg.document.HideFrame)
wx.CallAfter(func)
def Configure(self, action = 0):
panel = eg.ConfigPanel()
choice = panel.RadioBox(action, self.text.label)
panel.sizer.Add(choice, 0, wx.ALL, 10)
while panel.Affirmed():
panel.SetResult(choice.GetValue())
def GetLabel(self, action = 0):
return self.text.label[action]
class PythonCommand(eg.ActionWithStringParameter):
name = "Python Command"
description = "Executes a single Python statement."
iconFile = 'icons/PythonCommand'
class text:
parameterDescription = "Python statement:"
def __call__(self, pythonstring=""):
try:
try:
result = eval(pythonstring, {}, eg.globals.__dict__)
return result
except SyntaxError:
exec(pythonstring, {}, eg.globals.__dict__)
return eg.result
except:
eg.PrintTraceback(
eg.text.Error.InAction % pythonstring,
skip=1,
source=eg.currentItem
)
def GetLabel(self, pythonstring=""):
return pythonstring
class StopProcessing(eg.ActionBase):
name = "Stop Processing Event"
description = (
"Stops EventGhost from searching for further macros matching the "
"current event."
)
iconFile = 'icons/StopProcessing'
def __call__(self):
eg.event.skipEvent = True
DESCRIPTION = """<md>Triggers an event with options.
* ***Event String***
______
Events are separated by "."'s, the purpose for this is to group them
together. As an example if you wanted to group events together by a
device type this can be done by using the following example.
Remote.StopButton
So you would be able to group all of the various buttons to a remote.
Plugins use this same grouping mechanism for generating events. You also
have the ability to go one step further and group a group.
Remote.Volume.Up
Remote.Volume.Down
Remote.Volume.Mute
The purpose for this is when you add an event to an action you can
target specific groups by using the * so if you wanted to target all of
the events that take place for Remote.Volume you would add an event to
the macro like this.
Remote.Volume.*
This will run that macro for any event that begins with Remote.Volume.
All events have to have at the very least a group and an item. So if
you do not specify a group and only an item the group of Main will be
added automatically. So if you specify an event of VolumeUp the actual
event that will be triggered will be.
Main.VolumeUp
There is also a check box to enable or disable the Main prefix.
You will only be able to disable the Main if there is more then single group
You also have the ability to set the event string using a python
expression (see below).
<br><br>
* ***Wait Time***
______
The amount of time to wait before triggering the event. this has a
resolution of hundredths of a second.
<br><br>
* ***Event Payload***
______
When an event occurs you are able to attach a data packet to the
event. This data packet can be any kind of a python object.
Most common ones are:
* integers 3
* floats 0.00
* lists []
* tuples ()
* dictionaries {}
* unicode strings u''
* and strings ""
When there is an attached payload you will see the payload in the log.
You also have the ability just like the Event String to attach a
python expression (see below).
<br><br>
* ***Add to Queue***
______
If the wait time is set to 0.00 this option will appear. Adding the
event to the queue means that the event will get triggered after the
event that caused this action has processed all of it's macros and also
after processing any events that have come in while the event that
triggered this action was running.
If unchecked the event will get triggered right away Not being added
to the queue and not waiting until the event that started this action
has finished processing.
<br><br>
* ***Restore eg.event***
______
If add to queue has not been checked this option will appear. This
relates more to the scripting portions of EventGhost. What this done is
each and every time an event gets triggered there are 2 variables that
get set into place. Those 2 variables are eg.event and eg.eventString.
When you trigger an event while the current event is running those 2
variables will get changed to the new event. Upon completion of the new
event if you would like to change those variables back to the event that
ran this action then check this box.
<br><br>
* ***Using a Python Expression***
______
You can use a python expression in several ways. The expression
**MUST** be wrapped in curly braces {}. This is the identifier that
tells EventGhost that it needs to do some work.
You can pass global variables which are stored in eg.globals by
wrapping the variable name in the curly braces.
{eg.globals.some_variable}
If you want to transfer the results of another action you can do this
as well.
{eg.plugins.SomePlugin.SomeAction()}
***Or maybe you want to do something a little more complex.***
A different value passed based on if a global is True or False.
{"TV.On" if eg.globals.tv_power else "TV.Off"}
Or checking a global for a specific value and passing True or False.
{eg.plugins.SomePlugin.SomeAction() == 100}
When using a python expression in a payload the curly braces are the
same thing that is used in a dictionary but our crafty programmers have
accounted for this so don't worry.
These expressions get run when the TriggerAction gets run. So if you
have a programmed wait time (see below) the data may be different at the
start of the wait time then at the end.
"""
class TriggerEvent(eg.ActionBase):
__doc__ = DESCRIPTION
name = "Trigger Event"
description = DESCRIPTION
iconFile = "icons/Plugin"
class text:
text1 = "Event string to fire:"
text2 = "Delay the firing of the event:"
text3 = "seconds. (0 = fire immediately)"
text4 = "Add event to event queue:"
text5 = "Return eg.event to original event:"
text6 = "Event Payload:"
text7 = 'Remove "Main" prefix:'
def __call__(
self,
eventString,
waitTime=0,
payload=None,
queueEvent=True,
restoreEvent=False,
removeMain=False
):
def parse(value):
if value is None:
return None
parsed_value = eg.ParseString(value)
if value == parsed_value:
try:
value = eval(value)
except (SyntaxError, NameError):
pass
else:
value = parsed_value
return value
eventString = parse(eventString)
payload = parse(payload)
split_event = eventString.split('.', 1)
if len(split_event) == 1:
split_event.insert(0, 'Main')
if not removeMain and split_event[0] != 'Main':
split_event.insert(0, 'Main')
split_event = [split_event[0], '.'.join(split_event[1:])]
kwargs = dict(
prefix=split_event[0],
suffix=split_event[1],
payload=payload
)
if not waitTime:
if queueEvent:
eg.TriggerEvent(**kwargs)
else:
event = eg.EventGhostEvent(**kwargs)
if restoreEvent:
old_event_string = eg.eventString
old_event = eg.event
event.Execute()
eg.event = old_event
eg.eventString = old_event_string
else:
event.Execute()
else:
eg.scheduler.AddShortTask(waitTime, eg.TriggerEvent, **kwargs)
def Configure(
self,
eventString="",
waitTime=0,
payload=None,
queueEvent=False,
restoreEvent=False,
removeMain=False
):
panel = eg.ConfigPanel()
text = self.text
if payload is None:
payload = ''
eventStringCtrl = panel.TextCtrl(eventString)
waitTimeCtrl = panel.SpinNumCtrl(waitTime, integerWidth=5)
payloadCtrl = panel.TextCtrl(payload)
queueEventCtrl = wx.CheckBox(panel, -1, '')
restoreEventCtrl = wx.CheckBox(panel, -1, '')
removeMainCtrl = wx.CheckBox(panel, -1, '')
queueEventCtrl.SetValue(queueEvent)
restoreEventCtrl.SetValue(restoreEvent)
removeMainCtrl.SetValue(removeMain)
queueEventCtrl.Enable(not waitTime)
restoreEventCtrl.Enable(not waitTime and not queueEvent)
removeMainCtrl.Enable('.' in eventString)
if not eventString:
removeMainCtrl.Disable()
def on_char(evt):
if '.' in eventStringCtrl.GetValue():
removeMainCtrl.Enable()
else:
removeMainCtrl.Disable()
evt.Skip()
eventStringCtrl.Bind(wx.EVT_TEXT, on_char)
def on_spin(evt):
def check_spin():
value = bool(waitTimeCtrl.GetValue())
queueEventCtrl.Enable(not value)
restoreEventCtrl.Enable(
not value or (not value and not queueEventCtrl.GetValue())
)
wx.CallLater(20, check_spin)
evt.Skip()
def on_check(evt):
restoreEventCtrl.Enable(not queueEventCtrl.GetValue())
evt.Skip()
def HBoxSizer(lbl, ctrl, suf=None, prop=0):
sizer = wx.BoxSizer(wx.HORIZONTAL)
style = wx.EXPAND | wx.ALL | wx.ALIGN_BOTTOM
lbl = panel.StaticText(lbl)
lbl_sizer = wx.BoxSizer(wx.VERTICAL)
lbl_sizer.AddStretchSpacer(prop=1)
lbl_sizer.Add(lbl)
sizer.Add(lbl_sizer, 0, style, 5)
sizer.Add(ctrl, prop, style, 5)
if suf is not None:
suf = panel.StaticText(suf)
suf_sizer = wx.BoxSizer(wx.VERTICAL)
suf_sizer.AddStretchSpacer(prop=1)
suf_sizer.Add(suf)
sizer.Add(suf_sizer, 0, style, 5)
panel.sizer.Add(sizer, 0, wx.EXPAND)
return lbl
waitTimeCtrl.Bind(wx.EVT_SPIN, on_spin)
waitTimeCtrl.Bind(wx.EVT_CHAR_HOOK, on_spin)
queueEventCtrl.Bind(wx.EVT_CHECKBOX, on_check)
eg.EqualizeWidths((
HBoxSizer(text.text1, eventStringCtrl, prop=1),
HBoxSizer(text.text7, removeMainCtrl),
HBoxSizer(text.text6, payloadCtrl, prop=1),
HBoxSizer(text.text2, waitTimeCtrl, suf=text.text3),
HBoxSizer(text.text4, queueEventCtrl),
HBoxSizer(text.text5, restoreEventCtrl),
))
while panel.Affirmed():
panel.SetResult(
eventStringCtrl.GetValue(),
waitTimeCtrl.GetValue(),
payloadCtrl.GetValue() if payloadCtrl.GetValue() else None,
queueEventCtrl.IsEnabled() and queueEventCtrl.GetValue(),
restoreEventCtrl.IsEnabled() and restoreEventCtrl.GetValue(),
removeMainCtrl.IsEnabled() and removeMainCtrl.GetValue(),
)
def GetLabel(
self,
eventString="",
waitTime=0,
payload=None,
queueEvent=False,
restoreEvent=False
):
label = (
'%s: Event: %s, Payload: %s, Wait: ' %
(self.name, eventString, payload)
)
if waitTime:
label += '%.2f seconds' % waitTime
elif queueEvent:
label += 'Queued'
else:
label += 'Immediate, Restore eg.event: %s' % restoreEvent
return label
class Wait(eg.ActionBase):
name = "Wait"
description = "Pauses execution for the specified number of seconds."
iconFile = "icons/Wait"
class text:
label = "Wait: %s sec(s)"
wait = "Wait"
seconds = "seconds"
def __call__(self, waitTime):
eg.actionThread.Wait(waitTime)
def Configure(self, waitTime=0.0):
panel = eg.ConfigPanel()
waitTimeCtrl = panel.SpinNumCtrl(waitTime, integerWidth=3)
panel.AddLine(self.text.wait, waitTimeCtrl, self.text.seconds)
while panel.Affirmed():
panel.SetResult(waitTimeCtrl.GetValue())
def GetLabel(self, waitTime=0):
return self.text.label % str(waitTime)
|
tfroehlich82/EventGhost
|
plugins/EventGhost/__init__.py
|
Python
|
gpl-2.0
| 29,902
|
import json
import logging
import ckan.lib.search as search
from pylons import config
from ckan.lib.base import model
from ckan.model import Session
from pylons.i18n.translation import get_lang
from ckan.plugins.interfaces import Interface
from ckanext.dcatapit.model import DCATAPITTagVocabulary
log = logging.getLogger(__name__)
class ICustomSchema(Interface):
'''
Allows extensions to provide their own schema fields.
'''
def get_custom_schema(self):
'''gets the array containing the custom schema fields'''
return []
def get_language():
lang = get_lang()
if lang is not None:
lang = unicode(lang[0])
return lang
def update_solr_package_indexes(package_dict):
# Updating Solr Index
if package_dict:
log.debug("::: UPDATING SOLR INDEX :::")
# solr update here
psi = search.PackageSearchIndex()
# update the solr index in batches
BATCH_SIZE = 50
def process_solr(q):
# update the solr index for the query
query = search.PackageSearchQuery()
q = {
'q': q,
'fl': 'data_dict',
'wt': 'json',
'fq': 'site_id:"%s"' % config.get('ckan.site_id'),
'rows': BATCH_SIZE
}
for result in query.run(q)['results']:
data_dict = json.loads(result['data_dict'])
if data_dict['owner_org'] == package_dict.get('owner_org'):
psi.index_package(data_dict, defer_commit=True)
count = 0
q = []
q.append('id:"%s"' % (package_dict.get('id')))
count += 1
if count % BATCH_SIZE == 0:
process_solr(' OR '.join(q))
q = []
if len(q):
process_solr(' OR '.join(q))
# finally commit the changes
psi.commit()
else:
log.warning("::: package_dict is None: SOLR INDEX CANNOT BE UPDATED! :::")
def save_extra_package_multilang(pkg, lang, field_type):
try:
from ckanext.multilang.model import PackageMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return
log.debug('Creating create_loc_field for package ID: %r', str(pkg.get('id')))
PackageMultilang.persist(pkg, lang, field_type)
log.info('Localized field created successfully')
def upsert_package_multilang(pkg_id, field_name, field_type, lang, text):
try:
from ckanext.multilang.model import PackageMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return
pml = PackageMultilang.get(pkg_id, field_name, lang, field_type)
if not pml and text:
PackageMultilang.persist({'id':pkg_id, 'field':field_name, 'text':text}, lang, field_type)
elif pml and not text:
pml.purge()
elif pml and not pml.text == text:
pml.text = text
pml.save()
def upsert_resource_multilang(res_id, field_name, lang, text):
try:
from ckanext.multilang.model import ResourceMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return
ml = ResourceMultilang.get_for_pk(res_id, field_name, lang)
if not ml and text:
ResourceMultilang.persist_resources([ResourceMultilang(res_id, field_name, lang, text)])
elif ml and not text:
ml.purge()
elif ml and not ml.text == text:
ml.text = text
ml.save()
def update_extra_package_multilang(extra, pkg_id, field, lang, field_type='extra'):
try:
from ckanext.multilang.model import PackageMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return
if extra.get('key') == field.get('name', None) and field.get('localized', False) == True:
log.debug(':::::::::::::::Localizing schema field: %r', field['name'])
f = PackageMultilang.get(pkg_id, field['name'], lang, field_type)
if f:
if extra.get('value') == '':
f.purge()
elif f.text != extra.get('value'):
# Update the localized field value for the current language
f.text = extra.get('value')
f.save()
log.info('Localized field updated successfully')
elif extra.get('value') != '':
# Create the localized field record
save_extra_package_multilang({'id': pkg_id, 'text': extra.get('value'), 'field': extra.get('key')}, lang, 'extra')
def get_localized_field_value(field=None, pkg_id=None, field_type='extra'):
try:
from ckanext.multilang.model import PackageMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return
if field and pkg_id:
lang = get_language()
if lang:
localized_value = PackageMultilang.get(pkg_id, field, lang, field_type)
if localized_value:
return localized_value.text
else:
return None
else:
return None
else:
return None
def get_for_package(pkg_id):
'''
Returns all the localized fields of a dataset, in a dict of dicts, i.e.:
{FIELDNAME:{LANG:label,...},...}
Returns None if multilang extension not loaded.
'''
try:
from ckanext.multilang.model import PackageMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
# TODO: if no multilang, return the dataset in a single language in the same format of the multilang data
return None
records = PackageMultilang.get_for_package(pkg_id)
return _multilang_to_dict(records)
def get_for_resource(res_id):
'''
Returns all the localized fields of a dataset's resources, in a dict of dicts, i.e.:
{FIELDNAME:{LANG:label, ...}, ...}
Returns None if multilang extension not loaded.
'''
try:
from ckanext.multilang.model import ResourceMultilang
except ImportError:
log.warn('DCAT-AP_IT: multilang extension not available.')
return None
records = ResourceMultilang.get_for_resource_id(res_id)
return _multilang_to_dict(records)
def _multilang_to_dict(records):
fields_dict = {}
for r in records:
fieldname = r.field
lang = r.lang
value = r.text
lang_dict = fields_dict.get(fieldname, {})
if len(lang_dict) == 0:
fields_dict[fieldname] = lang_dict
lang_dict[lang] = value
return fields_dict
def persist_tag_multilang(name, lang, localized_text, vocab_name):
log.info('DCAT-AP_IT: persisting tag multilang for tag %r ...', name)
tag = DCATAPITTagVocabulary.by_name(name, lang)
if tag:
# Update the existing record
if localized_text and localized_text != tag.text:
tag.text = localized_text
try:
tag.save()
log.info('::::::::: OBJECT TAG UPDATED SUCCESSFULLY :::::::::')
pass
except Exception, e:
# on rollback, the same closure of state
# as that of commit proceeds.
Session.rollback()
log.error('Exception occurred while persisting DB objects: %s', e)
raise
else:
# Create a new localized record
vocab = model.Vocabulary.get(vocab_name)
existing_tag = model.Tag.by_name(name, vocab)
if existing_tag:
DCATAPITTagVocabulary.persist({'id': existing_tag.id, 'name': name, 'text': localized_text}, lang)
log.info('::::::::: OBJECT TAG PERSISTED SUCCESSFULLY :::::::::')
def get_localized_tag_name(tag_name=None, fallback_lang=None):
if tag_name:
lang = get_language()
localized_tag_name = DCATAPITTagVocabulary.by_name(tag_name, lang)
if localized_tag_name:
return localized_tag_name.text
else:
if fallback_lang:
fallback_name = DCATAPITTagVocabulary.by_name(tag_name, fallback_lang)
if fallback_name:
fallback_name = fallback_name.text
return fallback_name
else:
return tag_name
else:
return tag_name
else:
return None
def get_all_localized_tag_labels(tag_name):
return DCATAPITTagVocabulary.all_by_name(tag_name)
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckanext-dcatapit/ckanext/dcatapit/interfaces.py
|
Python
|
gpl-3.0
| 8,552
|
"""
GraphicsWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.
"""
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph.functions as fn
from .GraphicsWidget import GraphicsWidget
from .ViewBox import *
from .GradientEditorItem import *
from .LinearRegionItem import *
from .PlotDataItem import *
from .AxisItem import *
from .GridItem import *
from pyqtgraph.Point import Point
import pyqtgraph.functions as fn
import numpy as np
import pyqtgraph.debug as debug
__all__ = ['HistogramLUTItem']
class HistogramLUTItem(GraphicsWidget):
"""
This is a graphicsWidget which provides controls for adjusting the display of an image.
Includes:
- Image histogram
- Movable region over histogram to select black/white levels
- Gradient editor to define color lookup table for single-channel images
"""
sigLookupTableChanged = QtCore.Signal(object)
sigLevelsChanged = QtCore.Signal(object)
sigLevelChangeFinished = QtCore.Signal(object)
def __init__(self, image=None, fillHistogram=True):
"""
If *image* (ImageItem) is provided, then the control will be automatically linked to the image and changes to the control will be immediately reflected in the image's appearance.
By default, the histogram is rendered with a fill. For performance, set *fillHistogram* = False.
"""
GraphicsWidget.__init__(self)
self.lut = None
self.imageItem = None
self.layout = QtGui.QGraphicsGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(1,1,1,1)
self.layout.setSpacing(0)
self.vb = ViewBox()
self.vb.setMaximumWidth(152)
self.vb.setMinimumWidth(45)
self.vb.setMouseEnabled(x=False, y=True)
self.gradient = GradientEditorItem()
self.gradient.setOrientation('right')
self.gradient.loadPreset('grey')
self.region = LinearRegionItem([0, 1], LinearRegionItem.Horizontal)
self.region.setZValue(1000)
self.vb.addItem(self.region)
self.axis = AxisItem('left', linkView=self.vb, maxTickLength=-10, showValues=False)
self.layout.addItem(self.axis, 0, 0)
self.layout.addItem(self.vb, 0, 1)
self.layout.addItem(self.gradient, 0, 2)
self.range = None
self.gradient.setFlag(self.gradient.ItemStacksBehindParent)
self.vb.setFlag(self.gradient.ItemStacksBehindParent)
#self.grid = GridItem()
#self.vb.addItem(self.grid)
self.gradient.sigGradientChanged.connect(self.gradientChanged)
self.region.sigRegionChanged.connect(self.regionChanging)
self.region.sigRegionChangeFinished.connect(self.regionChanged)
self.vb.sigRangeChanged.connect(self.viewRangeChanged)
self.plot = PlotDataItem()
self.plot.rotate(90)
self.fillHistogram(fillHistogram)
self.vb.addItem(self.plot)
self.autoHistogramRange()
if image is not None:
self.setImageItem(image)
#self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
def fillHistogram(self, fill=True, level=0.0, color=(100, 100, 200)):
if fill:
self.plot.setFillLevel(level)
self.plot.setFillBrush(color)
else:
self.plot.setFillLevel(None)
#def sizeHint(self, *args):
#return QtCore.QSizeF(115, 200)
def paint(self, p, *args):
pen = self.region.lines[0].pen
rgn = self.getLevels()
p1 = self.vb.mapFromViewToItem(self, Point(self.vb.viewRect().center().x(), rgn[0]))
p2 = self.vb.mapFromViewToItem(self, Point(self.vb.viewRect().center().x(), rgn[1]))
gradRect = self.gradient.mapRectToParent(self.gradient.gradRect.rect())
for pen in [fn.mkPen('k', width=3), pen]:
p.setPen(pen)
p.drawLine(p1, gradRect.bottomLeft())
p.drawLine(p2, gradRect.topLeft())
p.drawLine(gradRect.topLeft(), gradRect.topRight())
p.drawLine(gradRect.bottomLeft(), gradRect.bottomRight())
#p.drawRect(self.boundingRect())
def setHistogramRange(self, mn, mx, padding=0.1):
"""Set the Y range on the histogram plot. This disables auto-scaling."""
self.vb.enableAutoRange(self.vb.YAxis, False)
self.vb.setYRange(mn, mx, padding)
#d = mx-mn
#mn -= d*padding
#mx += d*padding
#self.range = [mn,mx]
#self.updateRange()
#self.vb.setMouseEnabled(False, True)
#self.region.setBounds([mn,mx])
def autoHistogramRange(self):
"""Enable auto-scaling on the histogram plot."""
self.vb.enableAutoRange(self.vb.XYAxes)
#self.range = None
#self.updateRange()
#self.vb.setMouseEnabled(False, False)
#def updateRange(self):
#self.vb.autoRange()
#if self.range is not None:
#self.vb.setYRange(*self.range)
#vr = self.vb.viewRect()
#self.region.setBounds([vr.top(), vr.bottom()])
def setImageItem(self, img):
self.imageItem = img
img.sigImageChanged.connect(self.imageChanged)
img.setLookupTable(self.getLookupTable) ## send function pointer, not the result
#self.gradientChanged()
self.regionChanged()
self.imageChanged(autoLevel=True)
#self.vb.autoRange()
def viewRangeChanged(self):
self.update()
def gradientChanged(self):
if self.imageItem is not None:
if self.gradient.isLookupTrivial():
self.imageItem.setLookupTable(None) #lambda x: x.astype(np.uint8))
else:
self.imageItem.setLookupTable(self.getLookupTable) ## send function pointer, not the result
self.lut = None
#if self.imageItem is not None:
#self.imageItem.setLookupTable(self.gradient.getLookupTable(512))
self.sigLookupTableChanged.emit(self)
def getLookupTable(self, img=None, n=None, alpha=None):
if n is None:
if img.dtype == np.uint8:
n = 256
else:
n = 512
if self.lut is None:
self.lut = self.gradient.getLookupTable(n, alpha=alpha)
return self.lut
def regionChanged(self):
#if self.imageItem is not None:
#self.imageItem.setLevels(self.region.getRegion())
self.sigLevelChangeFinished.emit(self)
#self.update()
def regionChanging(self):
if self.imageItem is not None:
self.imageItem.setLevels(self.region.getRegion())
self.sigLevelsChanged.emit(self)
self.update()
def imageChanged(self, autoLevel=False, autoRange=False):
prof = debug.Profiler('HistogramLUTItem.imageChanged', disabled=True)
h = self.imageItem.getHistogram()
prof.mark('get histogram')
if h[0] is None:
return
self.plot.setData(*h)
prof.mark('set plot')
if autoLevel:
mn = h[0][0]
mx = h[0][-1]
self.region.setRegion([mn, mx])
prof.mark('set region')
prof.finish()
def getLevels(self):
return self.region.getRegion()
def setLevels(self, mn, mx):
self.region.setRegion([mn, mx])
|
ibressler/pyqtgraph
|
pyqtgraph/graphicsItems/HistogramLUTItem.py
|
Python
|
mit
| 7,526
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
#Author: Raymond Hettinger
#License: MIT License
#http://code.activestate.com/recipes/576693/ revision 9, downloaded 2012-03-28
from .python import iterkeys, iteritems
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in iterkeys(other):
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in iteritems(kwds):
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
hlin117/statsmodels
|
statsmodels/compat/ordereddict.py
|
Python
|
bsd-3-clause
| 8,996
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array
from .exceptions import DataDimensionalityWarning
from .exceptions import NotFittedError
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.components_ is None:
raise NotFittedError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://users.soe.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
alexeyum/scikit-learn
|
sklearn/random_projection.py
|
Python
|
bsd-3-clause
| 22,132
|
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
import string
import random
class Utils:
@staticmethod
def get_random_text(length_text=10, space_number=1, with_upper_case=True):
results = []
while len(results) < length_text:
char = random.choice(string.ascii_letters[:26])
results.append(char)
if with_upper_case:
results[0] = results[0].upper()
current_spaces = []
while len(current_spaces) < space_number:
space_pos = random.randint(2, length_text - 3)
if space_pos in current_spaces:
break
results[space_pos] = " "
if with_upper_case:
results[space_pos + 1] = results[space_pos - 1].upper()
current_spaces.append(space_pos)
return ''.join(results)
@staticmethod
def get_ios_id(length=10):
results = []
while len(results) < length:
char = random.choice(string.digits + string.ascii_letters)
results.append(char)
results[3] = "-"
results[6] = "-"
return ''.join(results)
@staticmethod
def get_android_id(length=10):
results = []
while len(results) < length:
char = random.choice(string.ascii_letters)
results.append(char)
return ''.join(results)
|
tonybeltramelli/pix2code
|
compiler/classes/Utils.py
|
Python
|
apache-2.0
| 1,382
|
# -*- coding: utf-8 -*-
"""
@copyright Copyright (c) 2013 Submit Consulting
@author Angel Sullon (@asullom)
@package sad
Descripcion: Tags para mostrar los menús dinámicos
"""
from django import template
from django.template import resolve_variable, Context
import datetime
from django.template.loader import render_to_string
from django.contrib.sessions.models import Session
from django.conf import settings
from apps.utils.security import DataAccessToken
from apps.space.models import Enterprise, Headquar
from django.template.defaultfilters import stringfilter
from apps.utils.messages import Message
from apps.utils.security import SecurityKey
from apps.sad.menus import Menus
register = template.Library()
@register.simple_tag
def load_menu(request, module):
"""
Interfáz del Método para cargar en variables los menús que se mostrará al usuario
Usage::
{% load_menu request 'MODULE_KEY' %}
Definition::
('WEB', 'Web informativa'),
('VENTAS', 'Ventas'),
('BACKEND', 'Backend Manager'),
Examples::
{% load_menu request 'BACKEND' %}
"""
return Menus.load(request, module)
@register.simple_tag
def desktop(request):
"""
Interfáz del Método para renderizar el menú de escritorio
Usage::
{% desktop request %}
Examples::
{% desktop request %}
"""
return Menus.desktop(request)
@register.simple_tag
def desktop_items(request):
"""
Interfáz del Método para listar los items en el backend
Usage::
{% desktop_items request %}
Examples::
{% desktop_items request %}
"""
return Menus.desktop_items(request)
@register.simple_tag
def phone(request):
"""
Interfáz del Método para renderizar el menú de dispositivos móviles
Usage::
{% phone request %}
Examples::
{% phone request %}
"""
return Menus.phone(request)
@register.simple_tag
def side_items(request):
"""
Interfáz del Método para listar los items en el sidebar
Usage::
{% side_items request %}
Examples::
{% side_items request %}
"""
return Menus.side_items(request)
@register.simple_tag
def get_grupos(request, url):
"""
Genera el menú de Grupos para imprimirlo en header.html
Usage::
{% get_grupos request get_url %}
Examples::
{% url 'mod_ventas_dashboard' as get_url %}
{% get_grupos request get_url %}
"""
sede = None
if DataAccessToken.get_headquar_id(request.session):
try:
sede = Headquar.objects.get(id=DataAccessToken.get_headquar_id(request.session))
except:
Message.error(request, ("Sede no se encuentra en la base de datos."))
value = ''
w = ""
d = DataAccessToken.get_grupo_id_list(request.session)
if sede:
w = (u' <a href="#" class="dropdown-toggle" data-toggle="dropdown" title ="%s">%s > %s %s<b class="caret"></b></a>' % (sede.association.name, sede.enterprise.name, sede.name, value))
o = ''
if d :
for i in d:
print i
o = o + (u'<li><a href="%s?grupo=%s">%s/%s</a></li>' % (url, i, sede.name, ""))
if sede:
o = o + (u'<li><a href="%s?">%s/Todas las areas</a></li>' % (url, sede.name))
a = (u'<ul class="nav">'
u' <li class="dropdown">'
u' %s'
u' <ul class="dropdown-menu">' % (w))
c = (u' </ul>'
u' </li>'
u'</ul>')
return "%s%s%s" % (a, o, c)
|
submitconsulting/backenddj
|
apps/sad/templatetags/user_menu.py
|
Python
|
bsd-3-clause
| 3,420
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2011, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import gc
import os
import re
import sys
import time
import shutil
import urllib
import unittest
import functools
import threading
from . import (callbacks, conf, drivers, httpserver, i18n, ircdb, irclib,
ircmsgs, ircutils, log, plugin, registry, utils, world)
from .utils import minisix
if minisix.PY2:
from httplib import HTTPConnection
from urllib import splithost, splituser
from urllib import URLopener
else:
from http.client import HTTPConnection
from urllib.parse import splithost, splituser
from urllib.request import URLopener
class verbosity:
NONE = 0
EXCEPTIONS = 1
MESSAGES = 2
i18n.import_conf()
network = True
# This is the global list of suites that are to be run.
suites = []
timeout = 10
originalCallbacksGetHelp = callbacks.getHelp
lastGetHelp = 'x' * 1000
def cachingGetHelp(method, name=None, doc=None):
global lastGetHelp
lastGetHelp = originalCallbacksGetHelp(method, name, doc)
return lastGetHelp
callbacks.getHelp = cachingGetHelp
def retry(tries=3):
assert tries > 0
def decorator(f):
@functools.wraps(f)
def newf(self):
try:
f(self)
except AssertionError as e:
first_exception = e
for _ in range(1, tries):
try:
f(self)
except AssertionError as e:
pass
else:
break
else:
# All failed
raise first_exception
return newf
return decorator
def getTestIrc():
irc = irclib.Irc('test')
# Gotta clear the connect messages (USER, NICK, etc.)
while irc.takeMsg():
pass
return irc
class TimeoutError(AssertionError):
def __str__(self):
return '%r timed out' % self.args[0]
class TestPlugin(callbacks.Plugin):
def eval(self, irc, msg, args):
"""<text>
This is the help for eval. Since Owner doesn't have an eval command
anymore, we needed to add this so as not to invalidate any of the tests
that depended on that eval command.
"""
try:
irc.reply(repr(eval(' '.join(args))))
except callbacks.ArgumentError:
raise
except Exception as e:
irc.reply(utils.exnToString(e))
# Since we know we don't now need the Irc object, we just give None. This
# might break if callbacks.Privmsg ever *requires* the Irc object.
TestInstance = TestPlugin(None)
conf.registerPlugin('TestPlugin', True, public=False)
class SupyTestCase(unittest.TestCase):
"""This class exists simply for extra logging. It's come in useful in the
past."""
def setUp(self):
log.critical('Beginning test case %s', self.id())
threads = [t.getName() for t in threading.enumerate()]
log.critical('Threads: %L', threads)
unittest.TestCase.setUp(self)
def tearDown(self):
for irc in world.ircs[:]:
irc._reallyDie()
if sys.version_info < (2, 7, 0):
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (repr(member),
repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (repr(member),
repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (repr(expr1),
repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
class PluginTestCase(SupyTestCase):
"""Subclass this to write a test case for a plugin. See
plugins/Plugin/test.py for an example.
"""
plugins = None
cleanConfDir = True
cleanDataDir = True
config = {}
def __init__(self, methodName='runTest'):
self.timeout = timeout
originalRunTest = getattr(self, methodName)
def runTest(self):
run = True
if hasattr(self, 'irc') and self.irc:
for cb in self.irc.callbacks:
cbModule = sys.modules[cb.__class__.__module__]
if hasattr(cbModule, 'deprecated') and cbModule.deprecated:
print('')
print('Ignored, %s is deprecated.' % cb.name())
run = False
if run:
originalRunTest()
runTest = utils.python.changeFunctionName(runTest, methodName)
setattr(self.__class__, methodName, runTest)
SupyTestCase.__init__(self, methodName=methodName)
self.originals = {}
def setUp(self, nick='test', forceSetup=False):
if not forceSetup and \
self.__class__ in (PluginTestCase, ChannelPluginTestCase):
# Necessary because there's a test in here that shouldn\'t run.
return
SupyTestCase.setUp(self)
# Just in case, let's do this. Too many people forget to call their
# super methods.
for irc in world.ircs[:]:
irc._reallyDie()
# Set conf variables appropriately.
conf.supybot.reply.whenAddressedBy.chars.setValue('@')
conf.supybot.reply.error.detailed.setValue(True)
conf.supybot.reply.whenNotCommand.setValue(True)
self.myVerbose = world.myVerbose
def rmFiles(dir):
for filename in os.listdir(dir):
file = os.path.join(dir, filename)
if os.path.isfile(file):
os.remove(file)
else:
shutil.rmtree(file)
if self.cleanConfDir:
rmFiles(conf.supybot.directories.conf())
if self.cleanDataDir:
rmFiles(conf.supybot.directories.data())
ircdb.users.reload()
ircdb.ignores.reload()
ircdb.channels.reload()
if self.plugins is None:
raise ValueError('PluginTestCase must have a "plugins" attribute.')
self.nick = nick
self.prefix = ircutils.joinHostmask(nick, 'user', 'host.domain.tld')
self.irc = getTestIrc()
MiscModule = plugin.loadPluginModule('Misc')
OwnerModule = plugin.loadPluginModule('Owner')
ConfigModule = plugin.loadPluginModule('Config')
plugin.loadPluginClass(self.irc, MiscModule)
plugin.loadPluginClass(self.irc, OwnerModule)
plugin.loadPluginClass(self.irc, ConfigModule)
if isinstance(self.plugins, str):
self.plugins = [self.plugins]
else:
for name in self.plugins:
if name not in ('Owner', 'Misc', 'Config'):
module = plugin.loadPluginModule(name,
ignoreDeprecation=True)
plugin.loadPluginClass(self.irc, module)
self.irc.addCallback(TestInstance)
for (name, value) in self.config.items():
group = conf.supybot
parts = registry.split(name)
if parts[0] == 'supybot':
parts.pop(0)
for part in parts:
group = group.get(part)
self.originals[group] = group()
group.setValue(value)
def tearDown(self):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
# Necessary because there's a test in here that shouldn\'t run.
return
for (group, original) in self.originals.items():
group.setValue(original)
ircdb.users.close()
ircdb.ignores.close()
ircdb.channels.close()
SupyTestCase.tearDown(self)
self.irc = None
gc.collect()
def _feedMsg(self, query, timeout=None, to=None, frm=None,
usePrefixChar=True, expectException=False):
if to is None:
to = self.irc.nick
if frm is None:
frm = self.prefix
if timeout is None:
timeout = self.timeout
if self.myVerbose >= verbosity.MESSAGES:
print('') # Extra newline, so it's pretty.
prefixChars = conf.supybot.reply.whenAddressedBy.chars()
if not usePrefixChar and query[0] in prefixChars:
query = query[1:]
if minisix.PY2:
query = query.encode('utf8') # unicode->str
msg = ircmsgs.privmsg(to, query, prefix=frm)
if self.myVerbose >= verbosity.MESSAGES:
print('Feeding: %r' % msg)
if not expectException and self.myVerbose >= verbosity.EXCEPTIONS:
conf.supybot.log.stdout.setValue(True)
self.irc.feedMsg(msg)
fed = time.time()
response = self.irc.takeMsg()
while response is None and time.time() - fed < timeout:
time.sleep(0.1) # So it doesn't suck up 100% cpu.
drivers.run()
response = self.irc.takeMsg()
if self.myVerbose >= verbosity.MESSAGES:
print('Response: %r' % response)
if not expectException and self.myVerbose >= verbosity.EXCEPTIONS:
conf.supybot.log.stdout.setValue(False)
return response
def getMsg(self, query, **kwargs):
return self._feedMsg(query, **kwargs)
def feedMsg(self, query, to=None, frm=None):
"""Just feeds it a message, that's all."""
if to is None:
to = self.irc.nick
if frm is None:
frm = self.prefix
self.irc.feedMsg(ircmsgs.privmsg(to, query, prefix=frm))
# These assertError/assertNoError are somewhat fragile. The proper way to
# do them would be to use a proxy for the irc object and intercept .error.
# But that would be hard, so I don't bother. When this breaks, it'll get
# fixed, but not until then.
def assertError(self, query, **kwargs):
m = self._feedMsg(query, expectException=True, **kwargs)
if m is None:
raise TimeoutError(query)
if lastGetHelp not in m.args[1]:
self.failUnless(m.args[1].startswith('Error:'),
'%r did not error: %s' % (query, m.args[1]))
return m
def assertSnarfError(self, query, **kwargs):
return self.assertError(query, usePrefixChar=False, **kwargs)
def assertNotError(self, query, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.failIf(m.args[1].startswith('Error:'),
'%r errored: %s' % (query, m.args[1]))
self.failIf(lastGetHelp in m.args[1],
'%r returned the help string.' % query)
return m
def assertSnarfNotError(self, query, **kwargs):
return self.assertNotError(query, usePrefixChar=False, **kwargs)
def assertHelp(self, query, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
msg = m.args[1]
if 'more message' in msg:
msg = msg[0:-27] # Strip (XXX more messages)
self.failUnless(msg in lastGetHelp,
'%s is not the help (%s)' % (m.args[1], lastGetHelp))
return m
def assertNoResponse(self, query, timeout=0, **kwargs):
m = self._feedMsg(query, timeout=timeout, **kwargs)
self.failIf(m, 'Unexpected response: %r' % m)
return m
def assertSnarfNoResponse(self, query, timeout=0, **kwargs):
return self.assertNoResponse(query, timeout=timeout,
usePrefixChar=False, **kwargs)
def assertResponse(self, query, expectedResponse, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.assertEqual(m.args[1], expectedResponse,
'%r != %r' % (expectedResponse, m.args[1]))
return m
def assertSnarfResponse(self, query, expectedResponse, **kwargs):
return self.assertResponse(query, expectedResponse,
usePrefixChar=False, **kwargs)
def assertRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.failUnless(re.search(regexp, m.args[1], flags),
'%r does not match %r' % (m.args[1], regexp))
return m
def assertSnarfRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
def assertNotRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.failUnless(re.search(regexp, m.args[1], flags) is None,
'%r matched %r' % (m.args[1], regexp))
return m
def assertSnarfNotRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertNotRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
def assertAction(self, query, expectedResponse=None, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.failUnless(ircmsgs.isAction(m), '%r is not an action.' % m)
if expectedResponse is not None:
s = ircmsgs.unAction(m)
self.assertEqual(s, expectedResponse,
'%r != %r' % (s, expectedResponse))
return m
def assertSnarfAction(self, query, expectedResponse=None, **kwargs):
return self.assertAction(query, expectedResponse=None,
usePrefixChar=False, **kwargs)
def assertActionRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError(query)
self.failUnless(ircmsgs.isAction(m))
s = ircmsgs.unAction(m)
self.failUnless(re.search(regexp, s, flags),
'%r does not match %r' % (s, regexp))
def assertSnarfActionRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertActionRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
_noTestDoc = ('Admin', 'Channel', 'Config',
'Misc', 'Owner', 'User', 'TestPlugin')
def TestDocumentation(self):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
return
for cb in self.irc.callbacks:
name = cb.name()
if ((name in self._noTestDoc) and \
not name.lower() in self.__class__.__name__.lower()):
continue
self.failUnless(sys.modules[cb.__class__.__name__].__doc__,
'%s has no module documentation.' % name)
if hasattr(cb, 'isCommandMethod'):
for attr in dir(cb):
if cb.isCommandMethod(attr) and \
attr == callbacks.canonicalName(attr):
self.failUnless(getattr(cb, attr, None).__doc__,
'%s.%s has no help.' % (name, attr))
class ChannelPluginTestCase(PluginTestCase):
channel = '#test'
def setUp(self, nick='test', forceSetup=False):
if not forceSetup and \
self.__class__ in (PluginTestCase, ChannelPluginTestCase):
return
PluginTestCase.setUp(self)
self.irc.feedMsg(ircmsgs.join(self.channel, prefix=self.prefix))
m = self.irc.takeMsg()
self.failIf(m is None, 'No message back from joining channel.')
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.failIf(m is None, 'No message back from joining channel.')
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.failIf(m is None, 'No message back from joining channel.')
self.assertEqual(m.command, 'WHO')
def _feedMsg(self, query, timeout=None, to=None, frm=None, private=False,
usePrefixChar=True, expectException=False):
if to is None:
if private:
to = self.irc.nick
else:
to = self.channel
if frm is None:
frm = self.prefix
if timeout is None:
timeout = self.timeout
if self.myVerbose >= verbosity.MESSAGES:
print('') # Newline, just like PluginTestCase.
prefixChars = conf.supybot.reply.whenAddressedBy.chars()
if query[0] not in prefixChars and usePrefixChar:
query = prefixChars[0] + query
if minisix.PY2 and isinstance(query, unicode):
query = query.encode('utf8') # unicode->str
if not expectException and self.myVerbose >= verbosity.EXCEPTIONS:
conf.supybot.log.stdout.setValue(True)
msg = ircmsgs.privmsg(to, query, prefix=frm)
if self.myVerbose >= verbosity.MESSAGES:
print('Feeding: %r' % msg)
self.irc.feedMsg(msg)
fed = time.time()
response = self.irc.takeMsg()
while response is None and time.time() - fed < timeout:
time.sleep(0.1)
drivers.run()
response = self.irc.takeMsg()
if response is not None:
if response.command == 'PRIVMSG':
args = list(response.args)
# Strip off nick: at beginning of response.
if args[1].startswith(self.nick) or \
args[1].startswith(ircutils.nickFromHostmask(self.prefix)):
try:
args[1] = args[1].split(' ', 1)[1]
except IndexError:
# Odd. We'll skip this.
pass
ret = ircmsgs.privmsg(*args)
else:
ret = response
else:
ret = None
if self.myVerbose >= verbosity.MESSAGES:
print('Returning: %r' % ret)
if not expectException and self.myVerbose >= verbosity.EXCEPTIONS:
conf.supybot.log.stdout.setValue(False)
return ret
def feedMsg(self, query, to=None, frm=None, private=False):
"""Just feeds it a message, that's all."""
if to is None:
if private:
to = self.irc.nick
else:
to = self.channel
if frm is None:
frm = self.prefix
self.irc.feedMsg(ircmsgs.privmsg(to, query, prefix=frm))
class TestRequestHandler(httpserver.SupyHTTPRequestHandler):
def __init__(self, rfile, wfile, *args, **kwargs):
self._headers_mode = True
self.rfile = rfile
self.wfile = wfile
self.handle_one_request()
def send_response(self, code):
assert self._headers_mode
self._response = code
def send_headers(self, name, value):
assert self._headers_mode
self._headers[name] = value
def end_headers(self):
assert self._headers_mode
self._headers_mode = False
def do_X(self, *args, **kwargs):
assert httpserver.http_servers, \
'The HTTP server is not started.'
self.server = httpserver.http_servers[0]
httpserver.SupyHTTPRequestHandler.do_X(self, *args, **kwargs)
httpserver.http_servers = [httpserver.TestSupyHTTPServer()]
# Partially stolen from the standard Python library :)
def open_http(url, data=None):
"""Use HTTP protocol."""
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = urllib.unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = urllib.splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if urllib.proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError('http error', 'no host given')
if proxy_passwd:
import base64
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
import base64
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
c = FakeHTTPConnection(host)
if data is not None:
c.putrequest('POST', selector)
c.putheader('Content-Type', 'application/x-www-form-urlencoded')
c.putheader('Content-Length', '%d' % len(data))
else:
c.putrequest('GET', selector)
if proxy_auth: c.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: c.putheader('Authorization', 'Basic %s' % auth)
if realhost: c.putheader('Host', realhost)
for args in URLopener().addheaders: c.putheader(*args)
c.endheaders()
return c
class FakeHTTPConnection(HTTPConnection):
_data = ''
_headers = {}
def __init__(self, rfile, wfile):
HTTPConnection.__init__(self, 'localhost')
self.rfile = rfile
self.wfile = wfile
def send(self, data):
if minisix.PY3 and isinstance(data, bytes):
data = data.decode()
self.wfile.write(data)
#def putheader(self, name, value):
# self._headers[name] = value
#def connect(self, *args, **kwargs):
# self.sock = self.wfile
#def getresponse(self, *args, **kwargs):
# pass
class HTTPPluginTestCase(PluginTestCase):
def setUp(self):
PluginTestCase.setUp(self, forceSetup=True)
def request(self, url, method='GET', read=True, data={}):
assert url.startswith('/')
wfile = minisix.io.StringIO()
rfile = minisix.io.StringIO()
connection = FakeHTTPConnection(wfile, rfile)
connection.putrequest(method, url)
connection.endheaders()
rfile.seek(0)
wfile.seek(0)
handler = TestRequestHandler(rfile, wfile)
if read:
return (handler._response, wfile.read())
else:
return handler._response
def assertHTTPResponse(self, uri, expectedResponse, **kwargs):
response = self.request(uri, read=False, **kwargs)
self.assertEqual(response, expectedResponse)
def assertNotHTTPResponse(self, uri, expectedResponse, **kwargs):
response = self.request(uri, read=False, **kwargs)
self.assertNotEqual(response, expectedResponse)
class ChannelHTTPPluginTestCase(ChannelPluginTestCase, HTTPPluginTestCase):
def setUp(self):
ChannelPluginTestCase.setUp(self, forceSetup=True)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
Ban3/Limnoria
|
src/test.py
|
Python
|
bsd-3-clause
| 25,777
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tommy Winther
# http://tommy.winther.nu
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - bluezed.apps@gmail.com
#
# Modified for TV Guide Fullscren (2016)
# by primaeval - primaeval.dev@gmail.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import threading
import datetime
import time
from xml.etree import ElementTree
import re
from strings import *
#from guideTypes import *
from fileFetcher import *
import xbmc
import xbmcgui
import xbmcvfs
import xbmcaddon
import sqlite3
SETTINGS_TO_CHECK = ['source', 'xmltv.type', 'xmltv.file', 'xmltv.url', 'xmltv.logo.folder']
class Channel(object):
def __init__(self, id, title, logo=None, streamUrl=None, visible=True, weight=-1):
self.id = id
self.title = title
self.logo = logo
self.streamUrl = streamUrl
self.visible = visible
self.weight = weight
def isPlayable(self):
return hasattr(self, 'streamUrl') and self.streamUrl
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return 'Channel(id=%s, title=%s, logo=%s, streamUrl=%s)' \
% (self.id, self.title, self.logo, self.streamUrl)
class Program(object):
def __init__(self, channel, title, startDate, endDate, description, imageLarge=None, imageSmall=None,
notificationScheduled=None, autoplayScheduled=None, season=None, episode=None, is_movie = False, language = "en"):
"""
@param channel:
@type channel: source.Channel
@param title:
@param startDate:
@param endDate:
@param description:
@param imageLarge:
@param imageSmall:
"""
self.channel = channel
self.title = title
self.startDate = startDate
self.endDate = endDate
self.description = description
self.imageLarge = imageLarge
self.imageSmall = imageSmall
self.notificationScheduled = notificationScheduled
self.autoplayScheduled = autoplayScheduled
self.season = season
self.episode = episode
self.is_movie = is_movie
self.language = language
def __repr__(self):
return 'Program(channel=%s, title=%s, startDate=%s, endDate=%s, description=%s, imageLarge=%s, ' \
'imageSmall=%s, episode=%s, season=%s, is_movie=%s)' % (self.channel, self.title, self.startDate,
self.endDate, self.description, self.imageLarge,
self.imageSmall, self.season, self.episode,
self.is_movie)
class SourceException(Exception):
pass
class SourceUpdateCanceledException(SourceException):
pass
class SourceNotConfiguredException(SourceException):
pass
class DatabaseSchemaException(sqlite3.DatabaseError):
pass
class Database(object):
SOURCE_DB = 'source.db'
CHANNELS_PER_PAGE = int(ADDON.getSetting('channels.per.page'))
def __init__(self):
self.conn = None
self.eventQueue = list()
self.event = threading.Event()
self.eventResults = dict()
self.source = instantiateSource()
self.updateInProgress = False
self.updateFailed = False
self.settingsChanged = None
self.alreadyTriedUnlinking = False
self.channelList = list()
self.category = "Any"
profilePath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
if not os.path.exists(profilePath):
os.makedirs(profilePath)
self.databasePath = os.path.join(profilePath, Database.SOURCE_DB)
threading.Thread(name='Database Event Loop', target=self.eventLoop).start()
def eventLoop(self):
print 'Database.eventLoop() >>>>>>>>>> starting...'
while True:
self.event.wait()
self.event.clear()
event = self.eventQueue.pop(0)
command = event[0]
callback = event[1]
print 'Database.eventLoop() >>>>>>>>>> processing command: ' + command.__name__
try:
result = command(*event[2:])
self.eventResults[command.__name__] = result
if callback:
if self._initialize == command:
threading.Thread(name='Database callback', target=callback, args=[result]).start()
else:
threading.Thread(name='Database callback', target=callback).start()
if self._close == command:
del self.eventQueue[:]
break
except Exception as detail:
xbmc.log('Database.eventLoop() >>>>>>>>>> exception! %s = %s' % (detail,command.__name__))
xbmc.executebuiltin("ActivateWindow(Home)")
print 'Database.eventLoop() >>>>>>>>>> exiting...'
def _invokeAndBlockForResult(self, method, *args):
event = [method, None]
event.extend(args)
self.eventQueue.append(event)
self.event.set()
while not method.__name__ in self.eventResults:
time.sleep(0.1)
result = self.eventResults.get(method.__name__)
del self.eventResults[method.__name__]
return result
def initialize(self, callback, cancel_requested_callback=None):
self.eventQueue.append([self._initialize, callback, cancel_requested_callback])
self.event.set()
def _initialize(self, cancel_requested_callback):
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
self.alreadyTriedUnlinking = False
while True:
if cancel_requested_callback is not None and cancel_requested_callback():
break
try:
self.conn = sqlite3.connect(self.databasePath, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('PRAGMA foreign_keys = ON')
self.conn.row_factory = sqlite3.Row
# create and drop dummy table to check if database is locked
c = self.conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS database_lock_check(id TEXT PRIMARY KEY)')
c.execute('DROP TABLE database_lock_check')
c.close()
self._createTables()
self.settingsChanged = self._wasSettingsChanged(ADDON)
break
except sqlite3.OperationalError:
if cancel_requested_callback is None:
xbmc.log('[script.tvguide.fullscreen] Database is locked, bailing out...', xbmc.LOGDEBUG)
break
else: # ignore 'database is locked'
xbmc.log('[script.tvguide.fullscreen] Database is locked, retrying...', xbmc.LOGDEBUG)
except sqlite3.DatabaseError:
self.conn = None
if self.alreadyTriedUnlinking:
xbmc.log('[script.tvguide.fullscreen] Database is broken and unlink() failed', xbmc.LOGDEBUG)
break
else:
try:
os.unlink(self.databasePath)
except OSError:
pass
self.alreadyTriedUnlinking = True
xbmcgui.Dialog().ok(ADDON.getAddonInfo('name'), strings(DATABASE_SCHEMA_ERROR_1),
strings(DATABASE_SCHEMA_ERROR_2), strings(DATABASE_SCHEMA_ERROR_3))
return self.conn is not None
def close(self, callback=None):
self.eventQueue.append([self._close, callback])
self.event.set()
def _close(self):
try:
# rollback any non-commit'ed changes to avoid database lock
if self.conn:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
if self.conn:
self.conn.close()
def _wasSettingsChanged(self, addon):
#gType = GuideTypes()
#if int(addon.getSetting('xmltv.type')) == gType.CUSTOM_FILE_ID:
# settingsChanged = addon.getSetting('xmltv.refresh') == 'true'
#else:
settingsChanged = False
noRows = True
count = 0
settingsChanged = addon.getSetting('xmltv.refresh') == 'true'
c = self.conn.cursor()
c.execute('SELECT * FROM settings')
for row in c:
noRows = False
key = row['key']
if SETTINGS_TO_CHECK.count(key):
count += 1
if row['value'] != addon.getSetting(key):
settingsChanged = True
if count != len(SETTINGS_TO_CHECK):
settingsChanged = True
if settingsChanged or noRows:
for key in SETTINGS_TO_CHECK:
value = addon.getSetting(key).decode('utf-8', 'ignore')
c.execute('INSERT OR IGNORE INTO settings(key, value) VALUES (?, ?)', [key, value])
if not c.rowcount:
c.execute('UPDATE settings SET value=? WHERE key=?', [value, key])
self.conn.commit()
c.close()
print 'Settings changed: ' + str(settingsChanged)
return settingsChanged
def _isCacheExpired(self, date):
if self.settingsChanged:
return True
# check if channel data is up-to-date in database
try:
c = self.conn.cursor()
c.execute('SELECT channels_updated FROM sources WHERE id=?', [self.source.KEY])
row = c.fetchone()
if not row:
return True
channelsLastUpdated = row['channels_updated']
c.close()
except TypeError:
return True
# check if program data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
if row:
programsLastUpdated = row['programs_updated']
else:
programsLastUpdated = datetime.datetime.fromtimestamp(0)
c.close()
return self.source.isUpdated(channelsLastUpdated, programsLastUpdated)
def updateChannelAndProgramListCaches(self, callback, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True):
self.eventQueue.append(
[self._updateChannelAndProgramListCaches, callback, date, progress_callback, clearExistingProgramList])
self.event.set()
def _updateChannelAndProgramListCaches(self, date, progress_callback, clearExistingProgramList):
# todo workaround service.py 'forgets' the adapter and convert set in _initialize.. wtf?!
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
if not self._isCacheExpired(date) and not self.source.needReset:
return
else:
# if the xmltv data needs to be loaded the database
# should be reset to avoid ghosting!
self.updateInProgress = True
c = self.conn.cursor()
c.execute("DELETE FROM updates")
c.execute("UPDATE sources SET channels_updated=0")
self.conn.commit()
c.close()
self.source.needReset = False
self.updateInProgress = True
self.updateFailed = False
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
try:
xbmc.log('[script.tvguide.fullscreen] Updating caches...', xbmc.LOGDEBUG)
if progress_callback:
progress_callback(0)
if self.settingsChanged:
c.execute('DELETE FROM channels WHERE source=?', [self.source.KEY])
c.execute('DELETE FROM programs WHERE source=?', [self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?", [self.source.KEY])
self.settingsChanged = False # only want to update once due to changed settings
if clearExistingProgramList:
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
else:
c.execute("DELETE FROM updates WHERE source=? AND date=?",
[self.source.KEY, dateStr]) # cascades and deletes associated programs records
# programs updated
c.execute("INSERT INTO updates(source, date, programs_updated) VALUES(?, ?, ?)",
[self.source.KEY, dateStr, datetime.datetime.now()])
updatesId = c.lastrowid
imported = imported_channels = imported_programs = 0
for item in self.source.getDataFromExternal(date, progress_callback):
imported += 1
if imported % 10000 == 0:
self.conn.commit()
if isinstance(item, Channel):
imported_channels += 1
channel = item
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=(CASE ? WHEN -1 THEN visible ELSE ? END), weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.weight, channel.visible,
channel.weight, channel.weight, channel.id, self.source.KEY])
elif isinstance(item, Program):
imported_programs += 1
program = item
if isinstance(program.channel, Channel):
channel = program.channel.id
else:
channel = program.channel
c.execute(
'INSERT INTO programs(channel, title, start_date, end_date, description, image_large, image_small, season, episode, is_movie, language, source, updates_id) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[channel, program.title, program.startDate, program.endDate, program.description,
program.imageLarge, program.imageSmall, program.season, program.episode, program.is_movie,
program.language, self.source.KEY, updatesId])
# channels updated
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.conn.commit()
if imported_channels == 0 or imported_programs == 0:
self.updateFailed = True
except SourceUpdateCanceledException:
# force source update on next load
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
self.conn.commit()
except Exception:
import traceback as tb
import sys
(etype, value, traceback) = sys.exc_info()
tb.print_exception(etype, value, traceback)
try:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
try:
# invalidate cached data
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
self.conn.commit()
except sqlite3.OperationalError:
pass # database is locked
self.updateFailed = True
finally:
self.updateInProgress = False
c.close()
def setCategory(self,category):
self.category = category
self.channelList = None
def getEPGView(self, channelStart, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True,category=None):
result = self._invokeAndBlockForResult(self._getEPGView, channelStart, date, progress_callback,
clearExistingProgramList, category)
if self.updateFailed:
raise SourceException('No channels or programs imported')
return result
def getQuickEPGView(self, channelStart, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True,category=None):
result = self._invokeAndBlockForResult(self._getQuickEPGView, channelStart, date, progress_callback,
clearExistingProgramList, category)
if self.updateFailed:
raise SourceException('No channels or programs imported')
return result
def _getEPGView(self, channelStart, date, progress_callback, clearExistingProgramList, category):
self._updateChannelAndProgramListCaches(date, progress_callback, clearExistingProgramList)
channels = self._getChannelList(onlyVisible=True)
if channelStart < 0:
channelStart = len(channels) - 1
elif channelStart > len(channels) - 1:
channelStart = 0
channelEnd = channelStart + Database.CHANNELS_PER_PAGE
channelsOnPage = channels[channelStart: channelEnd]
programs = self._getProgramList(channelsOnPage, date)
return [channelStart, channelsOnPage, programs]
def _getQuickEPGView(self, channelStart, date, progress_callback, clearExistingProgramList, category):
self._updateChannelAndProgramListCaches(date, progress_callback, clearExistingProgramList)
channels = self._getChannelList(onlyVisible=True)
if channelStart < 0:
channelStart = len(channels) - 1
elif channelStart > len(channels) - 1:
channelStart = 0
channelEnd = channelStart + 3
channelsOnPage = channels[channelStart: channelEnd]
programs = self._getProgramList(channelsOnPage, date)
return [channelStart, channelsOnPage, programs]
def getNumberOfChannels(self):
channels = self.getChannelList()
return len(channels)
def getNextChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx += 1
if idx > len(channels) - 1:
idx = 0
return channels[idx]
def getPreviousChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx -= 1
if idx < 0:
idx = len(channels) - 1
return channels[idx]
def saveChannelList(self, callback, channelList):
self.eventQueue.append([self._saveChannelList, callback, channelList])
self.event.set()
def _saveChannelList(self, channelList):
c = self.conn.cursor()
for idx, channel in enumerate(channelList):
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=?, weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight, channel.weight,
channel.id, self.source.KEY])
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.channelList = None
self.conn.commit()
def exportChannelList(self):
channelsList = self.getChannelList()
channels = [channel.title for channel in channelsList]
f = xbmcvfs.File('special://profile/addon_data/script.tvguide.fullscreen/channels.ini','wb')
for channel in sorted(channels):
f.write("%s=nothing\n" % channel.encode("utf8"))
f.close()
#TODO hangs on second call from _getNowList. use _getChannelList instead
def getChannelList(self, onlyVisible=True):
if not self.channelList or not onlyVisible:
result = self._invokeAndBlockForResult(self._getChannelList, onlyVisible)
if not onlyVisible:
return result
self.channelList = result
return self.channelList
def _getChannelList(self, onlyVisible):
c = self.conn.cursor()
channelList = list()
if onlyVisible:
c.execute('SELECT * FROM channels WHERE source=? AND visible=? ORDER BY weight', [self.source.KEY, True])
else:
c.execute('SELECT * FROM channels WHERE source=? ORDER BY weight', [self.source.KEY])
for row in c:
channel = Channel(row['id'], row['title'], row['logo'], row['stream_url'], row['visible'], row['weight'])
channelList.append(channel)
if self.category and self.category != "Any":
f = xbmcvfs.File('special://profile/addon_data/script.tvguide.fullscreen/categories.ini','rb')
lines = f.read().splitlines()
f.close()
filter = []
seen = set()
for line in lines:
if "=" not in line:
continue
name,cat = line.split('=')
if cat == self.category:
if name not in seen:
filter.append(name)
seen.add(name)
NONE = "0"
SORT = "1"
CATEGORIES = "2"
new_channels = []
if ADDON.getSetting('channel.filter.sort') == CATEGORIES:
for filter_name in filter:
for channel in channelList:
if channel.title == filter_name:
new_channels.append(channel)
if new_channels:
channelList = new_channels
else:
for channel in channelList:
if channel.title in filter:
new_channels.append(channel)
if new_channels:
if ADDON.getSetting('channel.filter.sort') == SORT:
channelList = sorted(new_channels, key=lambda channel: channel.title.lower())
else:
channelList = new_channels
c.close()
return channelList
def programSearch(self, search):
return self._invokeAndBlockForResult(self._programSearch, search)
def _programSearch(self, search):
programList = []
now = datetime.datetime.now()
c = self.conn.cursor()
channelList = self._getChannelList(True)
for channel in channelList:
search = "%%%s%%" % search
try: c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND title LIKE ?',
[channel.id, self.source.KEY,search])
except: return
for row in c:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
programList.append(program)
c.close()
return programList
def getChannelListing(self, channel):
return self._invokeAndBlockForResult(self._getChannelListing, channel)
def _getChannelListing(self, channel):
programList = []
c = self.conn.cursor()
try: c.execute('SELECT * FROM programs WHERE channel=?',
[channel.id])
except: return
for row in c:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
programList.append(program)
c.close()
return programList
def getNowList(self):
return self._invokeAndBlockForResult(self._getNowList)
def _getNowList(self):
programList = []
now = datetime.datetime.now()
c = self.conn.cursor()
channelList = self._getChannelList(True)
for channel in channelList:
try: c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND start_date <= ? AND end_date >= ?',
[channel.id, self.source.KEY,now,now])
except: return
row = c.fetchone()
if row:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
programList.append(program)
c.close()
return programList
def getNextList(self):
return self._invokeAndBlockForResult(self._getNextList)
def _getNextList(self):
programList = []
now = datetime.datetime.now()
c = self.conn.cursor()
channelList = self._getChannelList(True)
for channel in channelList:
try: c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND start_date >= ? AND end_date >= ?',
[channel.id, self.source.KEY,now,now])
except: return
row = c.fetchone()
if row:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
programList.append(program)
c.close()
return programList
def getCurrentProgram(self, channel):
return self._invokeAndBlockForResult(self._getCurrentProgram, channel)
def _getCurrentProgram(self, channel):
"""
@param channel:
@type channel: source.Channel
@return:
"""
program = None
now = datetime.datetime.now()
c = self.conn.cursor()
try: c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND start_date <= ? AND end_date >= ?',
[channel.id, self.source.KEY, now, now])
except: return
row = c.fetchone()
if row:
try:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
except:
return
c.close()
return program
def getNextProgram(self, channel):
return self._invokeAndBlockForResult(self._getNextProgram, channel)
def _getNextProgram(self, program):
try:
nextProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND start_date >= ? ORDER BY start_date ASC LIMIT 1',
[program.channel.id, self.source.KEY, program.endDate])
row = c.fetchone()
if row:
nextProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'], None, row['season'], row['episode'],
row['is_movie'], row['language'])
c.close()
return nextProgram
except:
return
def getPreviousProgram(self, channel):
return self._invokeAndBlockForResult(self._getPreviousProgram, channel)
def _getPreviousProgram(self, program):
try:
previousProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND end_date <= ? ORDER BY start_date DESC LIMIT 1',
[program.channel.id, self.source.KEY, program.startDate])
row = c.fetchone()
if row:
previousProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'],
row['description'], row['image_large'], row['image_small'], None, row['season'],
row['episode'], row['is_movie'], row['language'])
c.close()
return previousProgram
except:
return
def _getProgramList(self, channels, startTime):
"""
@param channels:
@type channels: list of source.Channel
@param startTime:
@type startTime: datetime.datetime
@return:
"""
endTime = startTime + datetime.timedelta(hours=2)
programList = list()
channelMap = dict()
for c in channels:
if c.id:
channelMap[c.id] = c
if not channels:
return []
c = self.conn.cursor()
c.execute(
'SELECT p.*, (SELECT 1 FROM notifications n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS notification_scheduled, (SELECT 1 FROM autoplays n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS autoplay_scheduled FROM programs p WHERE p.channel IN (\'' + (
'\',\''.join(channelMap.keys())) + '\') AND p.source=? AND p.end_date > ? AND p.start_date < ?',
[self.source.KEY, startTime, endTime])
for row in c:
program = Program(channelMap[row['channel']], row['title'], row['start_date'], row['end_date'],
row['description'], row['image_large'], row['image_small'], row['notification_scheduled'], row['autoplay_scheduled'],
row['season'], row['episode'], row['is_movie'], row['language'])
programList.append(program)
return programList
def _isProgramListCacheExpired(self, date=datetime.datetime.now()):
# check if data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
today = datetime.datetime.now()
expired = row is None or row['programs_updated'].day != today.day
c.close()
return expired
def setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
self._invokeAndBlockForResult(self._setCustomStreamUrl, channel, stream_url)
# no result, but block until operation is done
def _setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
image = ""
if ADDON.getSetting("addon.logos") == "true":
file_name = 'special://profile/addon_data/script.tvguide.fullscreen/icons.ini'
f = xbmcvfs.File(file_name)
items = f.read().splitlines()
f.close()
for item in items:
if item.startswith('['):
pass
elif item.startswith('#'):
pass
else:
url_icon = item.rsplit('|',1)
if len(url_icon) == 2:
url = url_icon[0]
icon = url_icon[1]
if url == stream_url:
if icon and icon != "nothing":
image = icon.rstrip('/')
c = self.conn.cursor()
if image:
c.execute('UPDATE OR REPLACE channels SET logo=? WHERE id=?' , (image, channel.id))
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
c.execute("INSERT INTO custom_stream_url(channel, stream_url) VALUES(?, ?)",
[channel.id, stream_url.decode('utf-8', 'ignore')])
self.conn.commit()
c.close()
def getCustomStreamUrl(self, channel):
return self._invokeAndBlockForResult(self._getCustomStreamUrl, channel)
def _getCustomStreamUrl(self, channel):
if not channel:
return
c = self.conn.cursor()
c.execute("SELECT stream_url FROM custom_stream_url WHERE channel=?", [channel.id])
stream_url = c.fetchone()
c.close()
if stream_url:
return stream_url[0]
else:
return None
def deleteCustomStreamUrl(self, channel):
self.eventQueue.append([self._deleteCustomStreamUrl, None, channel])
self.event.set()
def _deleteCustomStreamUrl(self, channel):
c = self.conn.cursor()
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
self.conn.commit()
c.close()
def getStreamUrl(self, channel):
customStreamUrl = self.getCustomStreamUrl(channel)
if customStreamUrl:
customStreamUrl = customStreamUrl.encode('utf-8', 'ignore')
return customStreamUrl
elif channel.isPlayable():
streamUrl = channel.streamUrl.encode('utf-8', 'ignore')
return streamUrl
return None
@staticmethod
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
@staticmethod
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def _createTables(self):
c = self.conn.cursor()
try:
c.execute('SELECT major, minor, patch FROM version')
(major, minor, patch) = c.fetchone()
version = [major, minor, patch]
except sqlite3.OperationalError:
version = [0, 0, 0]
try:
if version < [1, 3, 0]:
c.execute('CREATE TABLE IF NOT EXISTS custom_stream_url(channel TEXT, stream_url TEXT)')
c.execute('CREATE TABLE version (major INTEGER, minor INTEGER, patch INTEGER)')
c.execute('INSERT INTO version(major, minor, patch) VALUES(1, 3, 0)')
# For caching data
c.execute('CREATE TABLE sources(id TEXT PRIMARY KEY, channels_updated TIMESTAMP)')
c.execute(
'CREATE TABLE updates(id INTEGER PRIMARY KEY, source TEXT, date TEXT, programs_updated TIMESTAMP)')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
# For active setting
c.execute('CREATE TABLE settings(key TEXT PRIMARY KEY, value TEXT)')
# For notifications
c.execute(
"CREATE TABLE notifications(channel TEXT, program_title TEXT, source TEXT, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE)")
if version < [1, 3, 1]:
# Recreate tables with FOREIGN KEYS as DEFERRABLE INITIALLY DEFERRED
c.execute('UPDATE version SET major=1, minor=3, patch=1')
c.execute('DROP TABLE channels')
c.execute('DROP TABLE programs')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
if version < [1, 3, 2]:
# Recreate tables with seasons, episodes and is_movie
c.execute('UPDATE version SET major=1, minor=3, patch=2')
c.execute('DROP TABLE programs')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, season TEXT, episode TEXT, is_movie TEXT, language TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
c.execute(
"CREATE TABLE IF NOT EXISTS autoplays(channel TEXT, program_title TEXT, source TEXT, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE)")
# make sure we have a record in sources for this Source
c.execute("INSERT OR IGNORE INTO sources(id, channels_updated) VALUES(?, ?)", [self.source.KEY, 0])
self.conn.commit()
c.close()
#except sqlite3.OperationalError, ex:
except Exception as detail:
xbmc.log("(script.tvguide.fullscreen) %s" % detail, xbmc.LOGERROR)
dialog = xbmcgui.Dialog()
dialog.notification('script.tvguide.fullscreen', 'database exception %s' % detail, xbmcgui.NOTIFICATION_ERROR , 5000)
#raise DatabaseSchemaException(detail)
def addNotification(self, program):
self._invokeAndBlockForResult(self._addNotification, program)
# no result, but block until operation is done
def _addNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("INSERT INTO notifications(channel, program_title, source) VALUES(?, ?, ?)",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def removeNotification(self, program):
self._invokeAndBlockForResult(self._removeNotification, program)
# no result, but block until operation is done
def _removeNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("DELETE FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def getNotifications(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getNotifications, daysLimit)
def _getNotifications(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.title, p.title, p.start_date FROM notifications n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
programs = c.fetchall()
c.close()
return programs
def getFullNotifications(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getFullNotifications, daysLimit)
def _getFullNotifications(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
programList = list()
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.*, p.*,(SELECT 1 FROM notifications n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS notification_scheduled, " +
"(SELECT 1 FROM autoplays n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS autoplay_scheduled " +
"FROM notifications n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
for row in c:
channel = Channel(row[0],row[1],row[2],row[3],row[5],row[6])
program = Program(channel,title=row[8],startDate=row[9],endDate=row[10],description=row[11],imageLarge=row[12],imageSmall=row[13],
season=row[14],episode=row[15],is_movie=row[16],language=row[17],notificationScheduled=row[20],autoplayScheduled=row[21])
xbmc.log(repr(row.keys()))
programList.append(program)
c.close()
return programList
def isNotificationRequiredForProgram(self, program):
return self._invokeAndBlockForResult(self._isNotificationRequiredForProgram, program)
def _isNotificationRequiredForProgram(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("SELECT 1 FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
result = c.fetchone()
c.close()
return result
def clearAllNotifications(self):
self._invokeAndBlockForResult(self._clearAllNotifications)
# no result, but block until operation is done
def _clearAllNotifications(self):
c = self.conn.cursor()
c.execute('DELETE FROM notifications')
self.conn.commit()
c.close()
def addAutoplay(self, program):
self._invokeAndBlockForResult(self._addAutoplay, program)
# no result, but block until operation is done
def _addAutoplay(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("INSERT INTO autoplays(channel, program_title, source) VALUES(?, ?, ?)",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def removeAutoplay(self, program):
self._invokeAndBlockForResult(self._removeAutoplay, program)
# no result, but block until operation is done
def _removeAutoplay(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("DELETE FROM autoplays WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def getAutoplays(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getAutoplays, daysLimit)
def _getAutoplays(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.id, p.title, p.start_date, p.end_date FROM autoplays n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
programs = c.fetchall()
c.close()
return programs
def getFullAutoplays(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getFullAutoplays, daysLimit)
def _getFullAutoplays(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
programList = list()
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.*, p.*,(SELECT 1 FROM notifications n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS notification_scheduled, " +
"(SELECT 1 FROM autoplays n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS autoplay_scheduled " +
"FROM autoplays n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
for row in c:
channel = Channel(row[0],row[1],row[2],row[3],row[5],row[6])
program = Program(channel,title=row[8],startDate=row[9],endDate=row[10],description=row[11],imageLarge=row[12],imageSmall=row[13],
season=row[14],episode=row[15],is_movie=row[16],language=row[17],notificationScheduled=row[20],autoplayScheduled=row[21])
xbmc.log(repr(row.keys()))
programList.append(program)
c.close()
return programList
def isAutoplayRequiredForProgram(self, program):
return self._invokeAndBlockForResult(self._isAutoplayRequiredForProgram, program)
def _isAutoplayRequiredForProgram(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("SELECT 1 FROM autoplays WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
result = c.fetchone()
c.close()
return result
def clearAllAutoplays(self):
self._invokeAndBlockForResult(self._clearAllAutoplays)
# no result, but block until operation is done
def _clearAllAutoplays(self):
c = self.conn.cursor()
c.execute('DELETE FROM autoplays')
self.conn.commit()
c.close()
class Source(object):
def getDataFromExternal(self, date, progress_callback=None):
"""
Retrieve data from external as a list or iterable. Data may contain both Channel and Program objects.
The source may choose to ignore the date parameter and return all data available.
@param date: the date to retrieve the data for
@param progress_callback:
@return:
"""
return None
def isUpdated(self, channelsLastUpdated, programsLastUpdated):
today = datetime.datetime.now()
if channelsLastUpdated is None or channelsLastUpdated.day != today.day:
return True
if programsLastUpdated is None or programsLastUpdated.day != today.day:
return True
return False
class XMLTVSource(Source):
PLUGIN_DATA = xbmc.translatePath(os.path.join('special://profile', 'addon_data', 'script.tvguide.fullscreen'))
KEY = 'xmltv'
INI_TYPE_FILE = 0
INI_TYPE_URL = 1
INI_FILE = 'addons.ini'
LOGO_SOURCE_FOLDER = 1
LOGO_SOURCE_URL = 2
XMLTV_SOURCE_FILE = 0
XMLTV_SOURCE_URL = 1
CATEGORIES_TYPE_FILE = 0
CATEGORIES_TYPE_URL = 1
def __init__(self, addon):
#gType = GuideTypes()
self.needReset = False
self.fetchError = False
self.xmltvType = int(addon.getSetting('xmltv.type'))
self.xmltvInterval = int(addon.getSetting('xmltv.interval'))
self.logoSource = int(addon.getSetting('logos.source'))
self.addonsType = int(addon.getSetting('addons.ini.type'))
self.categoriesType = int(addon.getSetting('categories.ini.type'))
# make sure the folder in the user's profile exists or create it!
if not os.path.exists(XMLTVSource.PLUGIN_DATA):
os.makedirs(XMLTVSource.PLUGIN_DATA)
if self.logoSource == XMLTVSource.LOGO_SOURCE_FOLDER:
self.logoFolder = addon.getSetting('logos.folder')
elif self.logoSource == XMLTVSource.LOGO_SOURCE_URL:
self.logoFolder = addon.getSetting('logos.url')
else:
self.logoFolder = ""
if self.xmltvType == XMLTVSource.XMLTV_SOURCE_FILE:
customFile = str(addon.getSetting('xmltv.file'))
if os.path.exists(customFile):
# uses local file provided by user!
xbmc.log('[script.tvguide.fullscreen] Use local file: %s' % customFile, xbmc.LOGDEBUG)
self.xmltvFile = customFile
else:
# Probably a remote file
xbmc.log('[script.tvguide.fullscreen] Use remote file: %s' % customFile, xbmc.LOGDEBUG)
self.updateLocalFile(customFile, addon)
self.xmltvFile = customFile #os.path.join(XMLTVSource.PLUGIN_DATA, customFile.split('/')[-1])
else:
self.xmltvFile = self.updateLocalFile(addon.getSetting('xmltv.url'), addon)
if addon.getSetting('categories.ini.enabled') == 'true':
if self.categoriesType == XMLTVSource.CATEGORIES_TYPE_FILE:
customFile = str(addon.getSetting('categories.ini.file'))
else:
customFile = str(addon.getSetting('categories.ini.url'))
if customFile:
self.updateLocalFile(customFile, addon, True)
# make sure the ini file is fetched as well if necessary
if addon.getSetting('addons.ini.enabled') == 'true':
if self.addonsType == XMLTVSource.INI_TYPE_FILE:
customFile = str(addon.getSetting('addons.ini.file'))
else:
customFile = str(addon.getSetting('addons.ini.url'))
if customFile:
self.updateLocalFile(customFile, addon, True)
path = "special://profile/addon_data/script.tvguide.fullscreen/addons.ini"
if not xbmcvfs.exists(path):
f = xbmcvfs.File(path,"w")
f.close()
if not self.xmltvFile or not xbmcvfs.exists(self.xmltvFile):
raise SourceNotConfiguredException()
def updateLocalFile(self, name, addon, isIni=False):
fileName = os.path.basename(name)
path = os.path.join(XMLTVSource.PLUGIN_DATA, fileName)
fetcher = FileFetcher(name, addon)
retVal = fetcher.fetchFile()
if retVal == fetcher.FETCH_OK and not isIni:
self.needReset = True
elif retVal == fetcher.FETCH_ERROR:
xbmcgui.Dialog().ok(strings(FETCH_ERROR_TITLE), strings(FETCH_ERROR_LINE1), strings(FETCH_ERROR_LINE2))
return path
def getDataFromExternal(self, date, progress_callback=None):
f = FileWrapper(self.xmltvFile)
context = ElementTree.iterparse(f, events=("start", "end"))
size = f.size
return self.parseXMLTV(context, f, size, self.logoFolder, progress_callback)
def isUpdated(self, channelsLastUpdated, programLastUpdate):
if channelsLastUpdated is None or not xbmcvfs.exists(self.xmltvFile):
return True
stat = xbmcvfs.Stat(self.xmltvFile)
fileUpdated = datetime.datetime.fromtimestamp(stat.st_mtime())
return fileUpdated > channelsLastUpdated
def parseXMLTVDate(self, origDateString):
if origDateString.find(' ') != -1:
# get timezone information
dateParts = origDateString.split()
if len(dateParts) == 2:
dateString = dateParts[0]
offset = dateParts[1]
if len(offset) == 5:
offSign = offset[0]
offHrs = int(offset[1:3])
offMins = int(offset[-2:])
td = datetime.timedelta(minutes=offMins, hours=offHrs)
else:
td = datetime.timedelta(seconds=0)
elif len(dateParts) == 1:
dateString = dateParts[0]
td = datetime.timedelta(seconds=0)
else:
return None
# normalize the given time to UTC by applying the timedelta provided in the timestamp
try:
t_tmp = datetime.datetime.strptime(dateString, '%Y%m%d%H%M%S')
except TypeError:
xbmc.log('[script.tvguide.fullscreen] strptime error with this date: %s' % dateString, xbmc.LOGDEBUG)
t_tmp = datetime.datetime.fromtimestamp(time.mktime(time.strptime(dateString, '%Y%m%d%H%M%S')))
if offSign == '+':
t = t_tmp - td
elif offSign == '-':
t = t_tmp + td
else:
t = t_tmp
# get the local timezone offset in seconds
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
td_local = datetime.timedelta(seconds=utc_offset)
t = t + td_local
return t
else:
return None
def parseXMLTV(self, context, f, size, logoFolder, progress_callback):
event, root = context.next()
elements_parsed = 0
meta_installed = False
try:
xbmcaddon.Addon("plugin.video.meta")
meta_installed = True
except Exception:
pass # ignore addons that are not installed
if self.logoSource == XMLTVSource.LOGO_SOURCE_FOLDER:
dirs, files = xbmcvfs.listdir(logoFolder)
logos = [file[:-4] for file in files if file.endswith(".png")]
for event, elem in context:
if event == "end":
result = None
if elem.tag == "programme":
channel = elem.get("channel").replace("'", "") # Make ID safe to use as ' can cause crashes!
description = elem.findtext("desc")
iconElement = elem.find("icon")
icon = None
if iconElement is not None:
icon = iconElement.get("src")
if not description:
description = strings(NO_DESCRIPTION)
season = None
episode = None
is_movie = None
language = elem.find("title").get("lang")
if meta_installed == True:
episode_num = elem.findtext("episode-num")
categories = elem.findall("category")
for category in categories:
if "movie" in category.text.lower() or channel.lower().find("sky movies") != -1 \
or "film" in category.text.lower():
is_movie = "Movie"
break
if episode_num is not None:
episode_num = unicode.encode(unicode(episode_num), 'ascii','ignore')
if str.find(episode_num, ".") != -1:
splitted = str.split(episode_num, ".")
if splitted[0] != "":
season = int(splitted[0]) + 1
is_movie = None # fix for misclassification
if str.find(splitted[1], "/") != -1:
episode = int(splitted[1].split("/")[0]) + 1
elif splitted[1] != "":
episode = int(splitted[1]) + 1
elif str.find(episode_num.lower(), "season") != -1 and episode_num != "Season ,Episode ":
pattern = re.compile(r"Season\s(\d+).*?Episode\s+(\d+).*",re.I|re.U)
season = int(re.sub(pattern, r"\1", episode_num))
episode = (re.sub(pattern, r"\2", episode_num))
result = Program(channel, elem.findtext('title'), self.parseXMLTVDate(elem.get('start')),
self.parseXMLTVDate(elem.get('stop')), description, imageSmall=icon,
season = season, episode = episode, is_movie = is_movie, language= language)
elif elem.tag == "channel":
cid = elem.get("id").replace("'", "") # Make ID safe to use as ' can cause crashes!
title = elem.findtext("display-name")
iconElement = elem.find("icon")
icon = None
if iconElement is not None:
icon = iconElement.get("src")
logo = None
if icon and ADDON.getSetting('xmltv.logos'):
logo = icon
if logoFolder:
logoFile = os.path.join(logoFolder, title + '.png')
if self.logoSource == XMLTVSource.LOGO_SOURCE_URL:
logo = logoFile.replace(' ', '%20')
#elif xbmcvfs.exists(logoFile): #BUG case insensitive match but won't load image
# logo = logoFile
else:
#TODO use hash or db
for l in sorted(logos):
logox = re.sub(r' ','',l.lower())
t = re.sub(r' ','',title.lower())
t = re.sub(r'\+','\\+',t)
t = re.sub(r'[\(\)]',' ',t)
titleRe = "^%s" % t
if re.match(titleRe,logox):
logo = os.path.join(logoFolder, l + '.png')
break
streamElement = elem.find("stream")
streamUrl = None
if streamElement is not None:
streamUrl = streamElement.text
visible = elem.get("visible")
if visible == "0":
visible = False
else:
visible = True
result = Channel(cid, title, logo, streamUrl, visible)
if result:
elements_parsed += 1
if progress_callback and elements_parsed % 500 == 0:
if not progress_callback(100.0 / size * f.tell()):
raise SourceUpdateCanceledException()
yield result
root.clear()
f.close()
class FileWrapper(object):
def __init__(self, filename):
self.vfsfile = xbmcvfs.File(filename)
self.size = self.vfsfile.size()
self.bytesRead = 0
def close(self):
self.vfsfile.close()
def read(self, byteCount):
self.bytesRead += byteCount
return self.vfsfile.read(byteCount)
def tell(self):
return self.bytesRead
def instantiateSource():
return XMLTVSource(ADDON)
|
im85288/script.tvguide.fullscreen
|
source.py
|
Python
|
gpl-2.0
| 62,215
|
from builtins import object
__author__ = "grburgess"
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import quantity_support
import warnings
from threeML.config.config import threeML_config
from threeML.io.calculate_flux import (
_setup_analysis_dictionaries,
_collect_sums_into_dictionaries,
)
from threeML.io.plotting.cmap_cycle import cmap_intervals
def plot_point_source_spectra(*analysis_results, **kwargs):
warnings.warn("plot_point_source_spectra() has been replaced by plot_spectra().")
return plot_spectra(*analysis_results, **kwargs)
def plot_spectra(*analysis_results, **kwargs):
"""
plotting routine for fitted point source spectra
:param analysis_results: fitted JointLikelihood or BayesianAnalysis objects
:param sources_to_use: (optional) list of PointSource string names to plot from the analysis
:param energy_unit: (optional) astropy energy unit in string form (can also be frequency)
:param flux_unit: (optional) astropy flux unit in string form
:param confidence_level: (optional) confidence level to use (default: 0.68)
:param ene_min: (optional) minimum energy to plot
:param ene_max: (optional) maximum energy to plot
:param num_ene: (optional) number of energies to plot
:param use_components: (optional) True or False to plot the spectral components
:param components_to_use: (optional) list of string names of the components to plot: including 'total'
will also plot the total spectrum
:param sum_sources: (optional) some all the MLE and Bayesian sources
:param show_contours: (optional) True or False to plot the contour region
:param plot_style_kwargs: (optional) dictionary of MPL plot styling for the best fit curve
:param contour_style_kwargs: (optional) dictionary of MPL plot styling for the contour regions
:param fit_cmap: MPL color map to iterate over for plotting multiple analyses
:param contour_cmap: MPL color map to iterate over for plotting contours for multiple analyses
:param subplot: subplot to use
:param xscale: 'log' or 'linear'
:param yscale: 'log' or 'linear'
:param include_extended: True or False, also plot extended source spectra.
:return:
"""
# allow matplotlib to plot quantities to the access
quantity_support()
_defaults = {
"fit_cmap": threeML_config["model plot"]["point source plot"]["fit cmap"],
"contour_cmap": threeML_config["model plot"]["point source plot"][
"contour cmap"
],
"contour_colors": None,
"fit_colors": None,
"confidence_level": 0.68,
"equal_tailed": True,
"best_fit": "median",
"energy_unit": "keV",
"flux_unit": "1/(keV s cm2)",
"ene_min": 10.0,
"ene_max": 1e4,
"num_ene": 100,
"use_components": False,
"components_to_use": [],
"sources_to_use": [],
"sum_sources": False,
"show_contours": True,
"plot_style_kwargs": threeML_config["model plot"]["point source plot"][
"plot style"
],
"contour_style_kwargs": threeML_config["model plot"]["point source plot"][
"contour style"
],
"show_legend": True,
"legend_kwargs": threeML_config["model plot"]["point source plot"][
"legend style"
],
"subplot": None,
"xscale": "log",
"yscale": "log",
"include_extended": False,
}
for key, value in kwargs.items():
if key in _defaults:
_defaults[key] = value
if isinstance(_defaults["ene_min"], u.Quantity):
assert isinstance(
_defaults["ene_max"], u.Quantity
), "both energy arguments must be Quantities"
if isinstance(_defaults["ene_max"], u.Quantity):
assert isinstance(
_defaults["ene_min"], u.Quantity
), "both energy arguments must be Quantities"
if isinstance(_defaults["ene_max"], u.Quantity):
energy_range = np.linspace(
_defaults["ene_min"], _defaults["ene_max"], _defaults["num_ene"]
) # type: u.Quantity
_defaults["energy_unit"] = energy_range.unit
if _defaults["xscale"] == "log":
energy_range = (
np.logspace(
np.log10(energy_range.min().value),
np.log10(energy_range.max().value),
_defaults["num_ene"],
)
* energy_range.unit
)
else:
energy_range = np.logspace(
np.log10(_defaults["ene_min"]),
np.log10(_defaults["ene_max"]),
_defaults["num_ene"],
) * u.Unit(_defaults["energy_unit"])
(
mle_analyses,
bayesian_analyses,
num_sources_to_plot,
duplicate_keys,
) = _setup_analysis_dictionaries(
analysis_results,
energy_range,
_defaults["energy_unit"],
_defaults["flux_unit"],
_defaults["use_components"],
_defaults["components_to_use"],
_defaults["confidence_level"],
_defaults["equal_tailed"],
differential=True,
sources_to_use=_defaults["sources_to_use"],
include_extended=_defaults["include_extended"],
)
# we are now ready to plot.
# all calculations have been made.
# if we are not going to sum sources
if not _defaults["sum_sources"]:
if _defaults["fit_colors"] is None:
color_fit = cmap_intervals(num_sources_to_plot + 1, _defaults["fit_cmap"])
else:
# duck typing
if isinstance(_defaults["fit_colors"], (str, str)):
color_fit = [_defaults["fit_colors"]] * num_sources_to_plot
elif isinstance(_defaults["fit_colors"], list):
assert len(_defaults["fit_colors"]) == num_sources_to_plot, (
"list of colors (%d) must be the same length as sources ot plot (%s)"
% (len(_defaults["fit_colors"]), num_sources_to_plot)
)
color_fit = _defaults["fit_colors"]
else:
raise ValueError(
"Can not setup color, wrong type:", type(_defaults["fit_colors"])
)
if _defaults["contour_colors"] is None:
color_contour = cmap_intervals(
num_sources_to_plot + 1, _defaults["contour_cmap"]
)
else:
# duck typing
if isinstance(_defaults["contour_colors"], (str, str)):
color_contour = [_defaults["contour_colors"]] * num_sources_to_plot
elif isinstance(_defaults["contour_colors"], list):
assert len(_defaults["contour_colors"]) == num_sources_to_plot, (
"list of colors (%d) must be the same length as sources ot plot (%s)"
% (len(_defaults["contour_colors"]), num_sources_to_plot)
)
color_contour = _defaults["fit_colors"]
else:
raise ValueError(
"Can not setup contour color, wrong type:",
type(_defaults["contour_colors"]),
)
color_itr = 0
# go thru the mle analysis and plot spectra
plotter = SpectralContourPlot(
num_sources_to_plot,
xscale=_defaults["xscale"],
yscale=_defaults["yscale"],
show_legend=_defaults["show_legend"],
plot_kwargs=_defaults["plot_style_kwargs"],
contour_kwargs=_defaults["contour_style_kwargs"],
legend_kwargs=_defaults["legend_kwargs"],
emin=_defaults["ene_min"],
emax=_defaults["ene_max"],
subplot=_defaults["subplot"],
)
for key in list(mle_analyses.keys()):
# we won't assume to plot the total until the end
plot_total = False
if _defaults["use_components"]:
# if this source has no components or none that we wish to plot
# then we will plot the total spectrum after this
if (not list(mle_analyses[key]["components"].keys())) or (
"total" in _defaults["components_to_use"]
):
plot_total = True
for component in list(mle_analyses[key]["components"].keys()):
positive_error = None
negative_error = None
# extract the information and plot it
if _defaults["best_fit"] == "average":
best_fit = mle_analyses[key]["components"][component].average
else:
best_fit = mle_analyses[key]["components"][component].median
if _defaults["show_contours"]:
positive_error = mle_analyses[key]["components"][
component
].upper_error
negative_error = mle_analyses[key]["components"][
component
].lower_error
neg_mask = negative_error <= 0
# replace with small number
negative_error[neg_mask] = min(best_fit) * 0.9
label = "%s: %s" % (key, component)
# this is where we keep track of duplicates
if key in duplicate_keys:
label = "%s: MLE" % label
if mle_analyses[key]["components"][component].is_dimensionless:
plotter.add_dimensionless_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
else:
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
color_itr += 1
else:
plot_total = True
if plot_total:
# it ends up that we need to plot the total spectrum
# which is just a repeat of the process
if _defaults["best_fit"] == "average":
best_fit = mle_analyses[key]["fitted point source"].average
else:
best_fit = mle_analyses[key]["fitted point source"].median
if _defaults["show_contours"]:
positive_error = mle_analyses[key][
"fitted point source"
].upper_error
negative_error = mle_analyses[key][
"fitted point source"
].lower_error
neg_mask = negative_error <= 0
# replace with small number
negative_error[neg_mask] = min(best_fit) * 0.9
else:
positive_error = None
negative_error = None
label = "%s" % key
if key in duplicate_keys:
label = "%s: MLE" % label
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
color_itr += 1
# we will do the exact same thing for the bayesian analyses
for key in list(bayesian_analyses.keys()):
plot_total = False
if _defaults["use_components"]:
if (not list(bayesian_analyses[key]["components"].keys())) or (
"total" in _defaults["components_to_use"]
):
plot_total = True
for component in list(bayesian_analyses[key]["components"].keys()):
positive_error = None
negative_error = None
if _defaults["best_fit"] == "average":
best_fit = bayesian_analyses[key]["components"][
component
].average
else:
best_fit = bayesian_analyses[key]["components"][
component
].median
if _defaults["show_contours"]:
positive_error = bayesian_analyses[key]["components"][
component
].upper_error
negative_error = bayesian_analyses[key]["components"][
component
].lower_error
label = "%s: %s" % (key, component)
if key in duplicate_keys:
label = "%s: Bayesian" % label
if bayesian_analyses[key]["components"][component].is_dimensionless:
plotter.add_dimensionless_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
else:
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
color_itr += 1
else:
plot_total = True
if plot_total:
if _defaults["best_fit"] == "average":
best_fit = bayesian_analyses[key]["fitted point source"].average
else:
best_fit = bayesian_analyses[key]["fitted point source"].median
positive_error = None
negative_error = None
if _defaults["show_contours"]:
positive_error = bayesian_analyses[key][
"fitted point source"
].upper_error
negative_error = bayesian_analyses[key][
"fitted point source"
].lower_error
label = "%s" % key
if key in duplicate_keys:
label = "%s: Bayesian" % label
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label=label,
)
color_itr += 1
else:
# now we sum sources instead
# we keep MLE and Bayes apart because it makes no
# sense to sum them together
(
total_analysis_mle,
component_sum_dict_mle,
num_sources_to_plot,
) = _collect_sums_into_dictionaries(
mle_analyses, _defaults["use_components"], _defaults["components_to_use"]
)
(
total_analysis_bayes,
component_sum_dict_bayes,
num_sources_to_plot_bayes,
) = _collect_sums_into_dictionaries(
bayesian_analyses,
_defaults["use_components"],
_defaults["components_to_use"],
)
num_sources_to_plot += num_sources_to_plot_bayes
plotter = SpectralContourPlot(
num_sources_to_plot,
xscale=_defaults["xscale"],
yscale=_defaults["yscale"],
show_legend=_defaults["show_legend"],
plot_kwargs=_defaults["plot_style_kwargs"],
contour_kwargs=_defaults["contour_style_kwargs"],
legend_kwargs=_defaults["legend_kwargs"],
emin=_defaults["ene_min"],
emax=_defaults["ene_max"],
subplot=_defaults["subplot"],
)
color_fit = cmap_intervals(num_sources_to_plot, _defaults["fit_cmap"])
color_contour = cmap_intervals(num_sources_to_plot, _defaults["contour_cmap"])
color_itr = 0
if _defaults["use_components"] and list(component_sum_dict_mle.keys()):
# we have components to plot
for component, values in component_sum_dict_mle.items():
summed_analysis = sum(values)
if _defaults["best_fit"] == "average":
best_fit = summed_analysis.average
else:
best_fit = summed_analysis.median
positive_error = None
negative_error = None
if _defaults["show_contours"]:
positive_error = summed_analysis.upper_error
negative_error = summed_analysis.lower_error
neg_mask = negative_error <= 0
# replace with small number
negative_error[neg_mask] = min(best_fit) * 0.9
if np.any(
[c.is_dimensionless for c in component_sum_dict_mle[component]]
):
plotter.add_dimensionless_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="%s: MLE" % component,
)
else:
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="%s: MLE" % component,
)
color_itr += 1
if total_analysis_mle:
# we will sum and plot the total
# analysis
summed_analysis = sum(total_analysis_mle)
if _defaults["best_fit"] == "average":
best_fit = summed_analysis.average
else:
best_fit = summed_analysis.median
positive_error = None
negative_error = None
if _defaults["show_contours"]:
positive_error = best_fit + summed_analysis.upper_error
negative_error = best_fit - summed_analysis.lower_error
neg_mask = negative_error <= 0
# replace with small number
negative_error[neg_mask] = min(best_fit) * 0.9
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="total: MLE",
)
color_itr += 1
if _defaults["use_components"] and list(component_sum_dict_bayes.keys()):
# we have components to plot
for component, values in component_sum_dict_bayes.items():
summed_analysis = sum(values)
if _defaults["best_fit"] == "average":
best_fit = summed_analysis.average
else:
best_fit = summed_analysis.median
positive_error = None
negative_error = None
if _defaults["show_contours"]:
positive_error = summed_analysis.upper_error
negative_error = summed_analysis.lower_error
if np.any(
[c.is_dimensionless for c in component_sum_dict_bayes[component]]
):
plotter.add_dimensionless_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="%s: Bayesian" % component,
)
else:
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="%s: Bayesian" % component,
)
color_itr += 1
if total_analysis_bayes:
# we will sum and plot the total
# analysis
summed_analysis = sum(total_analysis_bayes)
if _defaults["best_fit"] == "average":
best_fit = summed_analysis.average
else:
best_fit = summed_analysis.median
positive_error = None
negative_error = None
if _defaults["show_contours"]:
positive_error = summed_analysis.upper_error
negative_error = summed_analysis.lower_error
plotter.add_model(
energy_range=energy_range,
best_fit=best_fit,
color=color_fit[color_itr],
upper_error=positive_error,
lower_error=negative_error,
contour_color=color_contour[color_itr],
label="total: Bayesian",
)
color_itr += 1
return plotter.finalize(_defaults)
class SpectralContourPlot(object):
def __init__(
self,
n_total,
xscale="log",
yscale="log",
show_legend=True,
plot_kwargs=None,
contour_kwargs=None,
legend_kwargs=None,
emin=None,
emax=None,
subplot=None,
):
self._n_total = n_total
self._show_legend = show_legend
self._legend_kwargs = legend_kwargs
self._emin = emin
self._emax = emax
self._plot_kwargs = plot_kwargs
self._contour_kwargs = contour_kwargs
if subplot is None:
self._fig, self._ax = plt.subplots()
else:
self._ax = subplot
self._fig = self._ax.get_figure()
self._ax_right = None
self._n_plotted = 0
self._xscale = xscale
self._yscale = yscale
def add_model(
self,
energy_range,
best_fit,
color,
upper_error=None,
lower_error=None,
contour_color=None,
label="model",
):
self._ax.plot(
energy_range, best_fit, color=color, label=label, **self._plot_kwargs
)
if (upper_error is not None) and (lower_error is not None):
self._ax.fill_between(
energy_range,
lower_error,
upper_error,
facecolor=contour_color,
**self._contour_kwargs
)
def add_dimensionless_model(
self,
energy_range,
best_fit,
color,
upper_error=None,
lower_error=None,
contour_color=None,
label="model",
):
if self._n_total > 1:
if self._ax_right is None:
self._ax_right = self._ax.twinx()
self._ax_right.plot(
energy_range, best_fit, color=color, label=label, **self._plot_kwargs
)
if (upper_error is not None) and (lower_error is not None):
self._ax_right.fill_between(
energy_range,
lower_error,
upper_error,
facecolor=contour_color,
**self._contour_kwargs
)
else:
self.add_model(
energy_range,
best_fit,
color,
upper_error,
lower_error,
contour_color,
label,
)
def finalize(self, _defaults):
self._ax.set_xscale(self._xscale)
self._ax.set_yscale(self._yscale)
if self._show_legend:
self._ax.legend(**self._legend_kwargs)
if self._ax_right is not None:
self._ax_right.set_yscale(self._yscale)
self._ax_right.set_ylabel("Arbitrary units")
if self._show_legend:
self._ax_right.legend(**self._legend_kwargs)
self._ax.set_xlim([self._emin, self._emax])
if isinstance(self._emin, u.Quantity) and self._show_legend:
# This workaround is needed because of a bug in astropy that would break the plotting of the legend
# (see issue #7504 in the Astropy github repo)
eemin = self._emin.to(self._ax.xaxis.get_units()).value
eemax = self._emax.to(self._ax.xaxis.get_units()).value
self._ax.set_xlim([eemin, eemax])
self._ax.xaxis.converter = None
return self._fig
|
giacomov/3ML
|
threeML/io/plotting/model_plot.py
|
Python
|
bsd-3-clause
| 26,801
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(show_topics=20, num_words=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i', self.m_num_docs_processed, self.m_D)
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(
doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge):
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = sp.psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (sp.psi(v) - dig_sum))
likelihood -= np.sum(sp.gammaln(np.sum(v, 0))) - np.sum(sp.gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def print_topics(self, num_topics=20, num_words=20):
"""Alias for `show_topics()` that prints the `num_words` most
probable words for `topics` number of topics to log.
Set `topics=-1` to print all topics."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
"""
Print the `num_words` most probable words for `topics` number of topics.
Set `topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, num_topics=10, num_words=10):
return self.show_topics(num_topics, num_words, True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
shown = []
if num_topics < 0:
num_topics = len(self.data)
num_topics = min(num_topics, len(self.data))
for k in xrange(num_topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, num_words)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def show_topic_terms(self, topic_data, num_words):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:num_words]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
fmt = 'topic %i: %s' % (topic_id, fmt)
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = 'topic %i:\n%s' % (topic_id, fmt)
return fmt
#endclass HdpTopicFormatter
|
tzoiker/gensim
|
gensim/models/hdpmodel.py
|
Python
|
lgpl-2.1
| 22,940
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# from mpl_toolkits.mplot3d import Axes3D
import math
x, y, z = np.genfromtxt('list', unpack=True, skip_header=0)
# find lots of points on the piecewise linear curve defined by x and y
M = 1000
t = np.linspace(0, len(x), M)
print(t)
# x = np.interp(t, np.arange(len(x)), x)
# y = np.interp(t, np.arange(len(y)), y)
# z = np.interp(t, np.arange(len(z)), z)
# tol = 0.25
# i, idx = 0, [0]
# while i < len(x):
# total_dist = 0
# for j in range(i+1, len(x)):
# total_dist += math.sqrt((x[j]-x[j-1])**2 + (y[j]-y[j-1])**2 + ((z[j]-z[j-1])**2))
# if total_dist > tol:
# idx.append(j)
# break
# i = j+1
#
# xn = x[idx]
# yn = y[idx]
# zn = z[idx]
# dx=xn[1]-xn[0]
# dy=yn[1]-yn[0]
# dz=zn[1]-zn[0]
# for i in range(0,2):
# tx=xn[0]-dx
# ty=yn[0]-dy
# tz=zn[0]-dz
# xn=np.insert(xn,0,tx)
# yn=np.insert(yn,0,ty)
# zn=np.insert(zn,0,tz)
#
#
# output = open ("path-equi.dat","w")
# fmt = '{0:14.8f} {1:14.8f} {2:14.8f}\n'
# for i in range(0,len(xn)):
# output.write(fmt.format(xn[i],yn[i],zn[i]))
# output2 = open ("path.xyz","w")
# fmt2= 'C {0:14.8f} {1:14.8f} {2:14.8f}\n'
# output2.write(str(len(xn))+"\n")
# output2.write("\n")
# for i in range(0,len(xn)):
# output2.write(fmt2.format(xn[i],yn[i],zn[i]))
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#ax.plot(xn,yn,zn,label='path')
#ax.legend()
#plt.show()
|
cmayes/md_utils
|
md_utils/path.py
|
Python
|
bsd-3-clause
| 1,474
|
##############################################################################
#
# A simple formatting example using XlsxWriter.
#
# This program demonstrates the indentation cell format.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2022, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('cell_indentation.xlsx')
worksheet = workbook.add_worksheet()
indent1 = workbook.add_format({'indent': 1})
indent2 = workbook.add_format({'indent': 2})
worksheet.set_column('A:A', 40)
worksheet.write('A1', "This text is indented 1 level", indent1)
worksheet.write('A2', "This text is indented 2 levels", indent2)
workbook.close()
|
jmcnamara/XlsxWriter
|
examples/cell_indentation.py
|
Python
|
bsd-2-clause
| 674
|
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
tliron/sincerity
|
components/skeletons/django/project/manage.py
|
Python
|
lgpl-3.0
| 524
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('neuroelectro', '0004_auto_20150805_2021'),
]
operations = [
migrations.RemoveField(
model_name='uservalidation',
name='user',
),
migrations.RemoveField(
model_name='articlemetadatamap',
name='validated_by',
),
migrations.DeleteModel(
name='UserValidation',
),
]
|
neuroelectro/neuroelectro_org
|
neuroelectro/migrations/0005_auto_20150805_2052.py
|
Python
|
gpl-2.0
| 562
|
from django.apps import AppConfig
class MallsConfig(AppConfig):
name = 'malls'
|
jojoriveraa/titulacion-NFCOW
|
NFCow/malls/apps.py
|
Python
|
apache-2.0
| 85
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
polygonize.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui, QtCore
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.SextanteUtils import SextanteUtils
from sextante.parameters.ParameterRaster import ParameterRaster
from sextante.parameters.ParameterString import ParameterString
from sextante.outputs.OutputVector import OutputVector
from sextante.gdal.GdalUtils import GdalUtils
class polygonize(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
FIELD = "FIELD"
def getIcon(self):
filepath = os.path.dirname(__file__) + "/icons/polygonize.png"
return QtGui.QIcon(filepath)
def defineCharacteristics(self):
self.name = "Polygonize"
self.group = "[GDAL] Conversion"
self.addParameter(ParameterRaster(polygonize.INPUT, "Input layer", False))
self.addParameter(ParameterString(polygonize.FIELD, "Output field name", "DN"))
self.addOutput(OutputVector(polygonize.OUTPUT, "Output layer"))
def processAlgorithm(self, progress):
arguments = []
arguments.append(self.getParameterValue(polygonize.INPUT))
arguments.append('-f')
arguments.append('"ESRI Shapefile"')
output = self.getOutputValue(polygonize.OUTPUT)
arguments.append(output)
arguments.append(QtCore.QFileInfo(output).baseName())
arguments.append(self.getParameterValue(polygonize.FIELD))
commands = []
if SextanteUtils.isWindows():
commands = ["cmd.exe", "/C ", "gdal_polygonize.bat", GdalUtils.escapeAndJoin(arguments)]
else:
commands = ["gdal_polygonize.py", GdalUtils.escapeAndJoin(arguments)]
GdalUtils.runGdal(commands, progress)
|
slarosa/QGIS
|
python/plugins/sextante/gdal/polygonize.py
|
Python
|
gpl-2.0
| 2,791
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sio(CMakePackage):
"""SIO is a persistency solution for reading and writing binary data in SIO
structures called record and block. SIO has originally been implemented as
persistency layer for LCIO.
"""
url = "https://github.com/iLCSoft/SIO/archive/v00-00-02.tar.gz"
homepage = "https://github.com/iLCSoft/SIO"
git = "https://github.com/iLCSoft/SIO.git"
maintainers = ['vvolkl', 'tmadlener']
version('master', branch='master')
version('0.0.3', sha256='4c8b9c08480fb53cd10abb0e1260071a8c3f68d06a8acfd373f6560a916155cc')
version('0.0.2', sha256='e4cd2aeaeaa23c1da2c20c5c08a9b72a31b16b7a8f5aa6d480dcd561ef667657')
def url_for_version(self, version):
"""Translate version numbers to ilcsoft conventions.
in spack, the convention is: 0.1 (or 0.1.0) 0.1.1, 0.2, 0.2.1 ...
in ilcsoft, releases are dashed and padded with a leading zero
the patch version is omitted when 0
so for example v01-12-01, v01-12 ...
:param self: spack package class that has a url
:type self: class: `spack.PackageBase`
:param version: version
:type param: str
"""
base_url = self.url.rsplit('/', 1)[0]
if len(version) == 1:
major = version[0]
minor, patch = 0, 0
elif len(version) == 2:
major, minor = version
patch = 0
else:
major, minor, patch = version
# By now the data is normalized enough to handle it easily depending
# on the value of the patch version
if patch == 0:
version_str = 'v%02d-%02d.tar.gz' % (major, minor)
else:
version_str = 'v%02d-%02d-%02d.tar.gz' % (major, minor, patch)
return base_url + '/' + version_str
|
iulian787/spack
|
var/spack/repos/builtin/packages/sio/package.py
|
Python
|
lgpl-2.1
| 2,029
|
from discord.ext.commands import CommandError
class InvalidTime(CommandError):
'Exception raised, raid time invalid'
pass
class RaidDisabled(CommandError):
'Exception raised, raid not enabled'
pass
class TrainDisabled(CommandError):
'Exception raised, train not enabled'
pass
class GroupTooBig(CommandError):
'Exception raised, group too big'
pass
class NotRaidChannel(CommandError):
'Exception raised, not raid channel'
pass
class NotTrainChannel(CommandError):
'Exception raised, not train channel'
pass
class RaidNotActive(CommandError):
'Exception raised, raid not active'
pass
class MeetupDisabled(CommandError):
'Exception raised, meetup not enabled'
pass
|
FoglyOgly/Meowth
|
meowth/exts/raid/errors.py
|
Python
|
gpl-3.0
| 737
|
from __future__ import absolute_import
from __future__ import print_function
import datetime
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from django.conf import settings
from django.db import connection
from django.forms.models import model_to_dict
from django.utils.timezone import make_aware as timezone_make_aware
from django.utils.timezone import utc as timezone_utc
from django.utils.timezone import is_naive as timezone_is_naive
from django.db.models.query import QuerySet
import glob
import logging
import os
import ujson
import shutil
import subprocess
import tempfile
from zerver.lib.avatar_hash import user_avatar_hash
from zerver.lib.create_user import random_api_key
from zerver.models import UserProfile, Realm, Client, Huddle, Stream, \
UserMessage, Subscription, Message, RealmEmoji, RealmFilter, \
RealmDomain, Recipient, DefaultStream, get_user_profile_by_id, \
UserPresence, UserActivity, UserActivityInterval, get_user_profile_by_email, \
get_display_recipient, Attachment
from zerver.lib.parallel import run_parallel
from zerver.lib.utils import mkdir_p
from six.moves import range
from typing import Any, Callable, Dict, List, Set, Tuple
# Custom mypy types follow:
Record = Dict[str, Any]
TableName = str
TableData = Dict[TableName, List[Record]]
Field = str
Path = str
Context = Dict[str, Any]
FilterArgs = Dict[str, Any]
IdSource = Tuple[TableName, Field]
SourceFilter = Callable[[Record], bool]
# These next two types are callbacks, which mypy does not
# support well, because PEP 484 says "using callbacks
# with keyword arguments is not perceived as a common use case."
# CustomFetch = Callable[[TableData, Config, Context], None]
# PostProcessData = Callable[[TableData, Config, Context], None]
CustomFetch = Any # TODO: make more specific, see above
PostProcessData = Any # TODO: make more specific
# The keys of our MessageOutput variables are normally
# List[Record], but when we write partials, we can get
# lists of integers or a single integer.
# TODO: tighten this up with a union.
MessageOutput = Dict[str, Any]
realm_tables = [("zerver_defaultstream", DefaultStream),
("zerver_realmemoji", RealmEmoji),
("zerver_realmdomain", RealmDomain),
("zerver_realmfilter", RealmFilter)] # List[Tuple[TableName, Any]]
ALL_ZERVER_TABLES = [
# TODO: get a linter to ensure that this list is actually complete.
'zerver_attachment',
'zerver_attachment_messages',
'zerver_client',
'zerver_defaultstream',
'zerver_huddle',
'zerver_message',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_realm',
'zerver_realmdomain',
'zerver_realmemoji',
'zerver_realmfilter',
'zerver_recipient',
'zerver_referral',
'zerver_scheduledjob',
'zerver_stream',
'zerver_subscription',
'zerver_useractivity',
'zerver_useractivityinterval',
'zerver_usermessage',
'zerver_userpresence',
'zerver_userprofile',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
]
NON_EXPORTED_TABLES = [
# These are known to either be altogether obsolete or
# simply inappropriate for exporting (e.g. contains transient
# data).
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_referral',
'zerver_scheduledjob',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
]
assert set(NON_EXPORTED_TABLES).issubset(set(ALL_ZERVER_TABLES))
IMPLICIT_TABLES = [
# ManyToMany relationships are exported implicitly.
'zerver_attachment_messages',
]
assert set(IMPLICIT_TABLES).issubset(set(ALL_ZERVER_TABLES))
ATTACHMENT_TABLES = [
'zerver_attachment',
]
assert set(ATTACHMENT_TABLES).issubset(set(ALL_ZERVER_TABLES))
MESSAGE_TABLES = [
# message tables get special treatment, because they're so big
'zerver_message',
'zerver_usermessage',
]
DATE_FIELDS = {
'zerver_attachment': ['create_time'],
'zerver_message': ['last_edit_time', 'pub_date'],
'zerver_realm': ['date_created'],
'zerver_stream': ['date_created'],
'zerver_useractivity': ['last_visit'],
'zerver_useractivityinterval': ['start', 'end'],
'zerver_userpresence': ['timestamp'],
'zerver_userprofile': ['date_joined', 'last_login', 'last_reminder'],
} # type: Dict[TableName, List[Field]]
def sanity_check_output(data):
# type: (TableData) -> None
tables = set(ALL_ZERVER_TABLES)
tables -= set(NON_EXPORTED_TABLES)
tables -= set(IMPLICIT_TABLES)
tables -= set(MESSAGE_TABLES)
tables -= set(ATTACHMENT_TABLES)
for table in tables:
if table not in data:
logging.warn('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,))
def write_data_to_file(output_file, data):
# type: (Path, Any) -> None
with open(output_file, "w") as f:
f.write(ujson.dumps(data, indent=4))
def make_raw(query, exclude=None):
# type: (Any, List[Field]) -> List[Record]
'''
Takes a Django query and returns a JSONable list
of dictionaries corresponding to the database rows.
'''
rows = []
for instance in query:
data = model_to_dict(instance, exclude=exclude)
"""
In Django 1.10, model_to_dict resolves ManyToManyField as a QuerySet.
Previously, we used to get primary keys. Following code converts the
QuerySet into primary keys.
For reference: https://www.mail-archive.com/django-updates@googlegroups.com/msg163020.html
"""
for field in instance._meta.many_to_many:
value = data[field.name]
if isinstance(value, QuerySet):
data[field.name] = [row.pk for row in value]
rows.append(data)
return rows
def floatify_datetime_fields(data, table):
# type: (TableData, TableName) -> None
for item in data[table]:
for field in DATE_FIELDS[table]:
orig_dt = item[field]
if orig_dt is None:
continue
if timezone_is_naive(orig_dt):
logging.warning("Naive datetime:", item)
dt = timezone_make_aware(orig_dt)
else:
dt = orig_dt
utc_naive = dt.replace(tzinfo=None) - dt.utcoffset()
item[field] = (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class Config(object):
'''
A Config object configures a single table for exporting (and,
maybe some day importing as well.
You should never mutate Config objects as part of the export;
instead use the data to determine how you populate other
data structures.
There are parent/children relationships between Config objects.
The parent should be instantiated first. The child will
append itself to the parent's list of children.
'''
def __init__(self, table=None, model=None,
normal_parent=None, virtual_parent=None,
filter_args=None, custom_fetch=None, custom_tables=None,
post_process_data=None,
concat_and_destroy=None, id_source=None, source_filter=None,
parent_key=None, use_all=False, is_seeded=False, exclude=None):
# type: (str, Any, Config, Config, FilterArgs, CustomFetch, List[TableName], PostProcessData, List[TableName], IdSource, SourceFilter, Field, bool, bool, List[Field]) -> None
assert table or custom_tables
self.table = table
self.model = model
self.normal_parent = normal_parent
self.virtual_parent = virtual_parent
self.filter_args = filter_args
self.parent_key = parent_key
self.use_all = use_all
self.is_seeded = is_seeded
self.exclude = exclude
self.custom_fetch = custom_fetch
self.custom_tables = custom_tables
self.post_process_data = post_process_data
self.concat_and_destroy = concat_and_destroy
self.id_source = id_source
self.source_filter = source_filter
self.children = [] # type: List[Config]
if normal_parent:
self.parent = normal_parent
else:
self.parent = None
if virtual_parent and normal_parent:
raise Exception('''
If you specify a normal_parent, please
do not create a virtual_parent.
''')
if normal_parent:
normal_parent.children.append(self)
elif virtual_parent:
virtual_parent.children.append(self)
elif not is_seeded:
raise Exception('''
You must specify a parent if you are
not using is_seeded.
''')
if self.id_source:
if self.id_source[0] != self.virtual_parent.table:
raise Exception('''
Configuration error. To populate %s, you
want data from %s, but that differs from
the table name of your virtual parent (%s),
which suggests you many not have set up
the ordering correctly. You may simply
need to assign a virtual_parent, or there
may be deeper issues going on.''' % (
self.table,
self.id_source[0],
self.virtual_parent.table))
def export_from_config(response, config, seed_object=None, context=None):
# type: (TableData, Config, Any, Context) -> None
table = config.table
parent = config.parent
model = config.model
if context is None:
context = {}
if table:
exported_tables = [table]
else:
exported_tables = config.custom_tables
for t in exported_tables:
logging.info('Exporting via export_from_config: %s' % (t,))
rows = None
if config.is_seeded:
rows = [seed_object]
elif config.custom_fetch:
config.custom_fetch(
response=response,
config=config,
context=context
)
if config.custom_tables:
for t in config.custom_tables:
if t not in response:
raise Exception('Custom fetch failed to populate %s' % (t,))
elif config.concat_and_destroy:
# When we concat_and_destroy, we are working with
# temporary "tables" that are lists of records that
# should already be ready to export.
data = [] # type: List[Record]
for t in config.concat_and_destroy:
data += response[t]
del response[t]
logging.info('Deleted temporary %s' % (t,))
response[table] = data
elif config.use_all:
query = model.objects.all()
rows = list(query)
elif config.normal_parent:
# In this mode, our current model is figuratively Article,
# and normal_parent is figuratively Blog, and
# now we just need to get all the articles
# contained by the blogs.
model = config.model
parent_ids = [r['id'] for r in response[parent.table]]
filter_parms = {config.parent_key: parent_ids}
if config.filter_args:
filter_parms.update(config.filter_args)
query = model.objects.filter(**filter_parms)
rows = list(query)
elif config.id_source:
# In this mode, we are the figurative Blog, and we now
# need to look at the current response to get all the
# blog ids from the Article rows we fetched previously.
model = config.model
# This will be a tuple of the form ('zerver_article', 'blog').
(child_table, field) = config.id_source
child_rows = response[child_table]
if config.source_filter:
child_rows = [r for r in child_rows if config.source_filter(r)]
lookup_ids = [r[field] for r in child_rows]
filter_parms = dict(id__in=lookup_ids)
if config.filter_args:
filter_parms.update(config.filter_args)
query = model.objects.filter(**filter_parms)
rows = list(query)
# Post-process rows (which won't apply to custom fetches/concats)
if rows is not None:
response[table] = make_raw(rows, exclude=config.exclude)
if table in DATE_FIELDS:
floatify_datetime_fields(response, table)
if config.post_process_data:
config.post_process_data(
response=response,
config=config,
context=context
)
# Now walk our children. It's extremely important to respect
# the order of children here.
for child_config in config.children:
export_from_config(
response=response,
config=child_config,
context=context,
)
def get_realm_config():
# type: () -> Config
# This is common, public information about the realm that we can share
# with all realm users.
realm_config = Config(
table='zerver_realm',
is_seeded=True
)
Config(
table='zerver_defaultstream',
model=DefaultStream,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmemoji',
model=RealmEmoji,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmdomain',
model=RealmDomain,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmfilter',
model=RealmFilter,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_client',
model=Client,
virtual_parent=realm_config,
use_all=True
)
user_profile_config = Config(
custom_tables=[
'zerver_userprofile',
'zerver_userprofile_mirrordummy',
],
# set table for children who treat us as normal parent
table='zerver_userprofile',
virtual_parent=realm_config,
custom_fetch=fetch_user_profile,
)
Config(
custom_tables=[
'zerver_userprofile_crossrealm',
],
virtual_parent=user_profile_config,
custom_fetch=fetch_user_profile_cross_realm,
)
Config(
table='zerver_userpresence',
model=UserPresence,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivity',
model=UserActivity,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivityinterval',
model=UserActivityInterval,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# Some of these tables are intermediate "tables" that we
# create only for the export. Think of them as similar to views.
user_subscription_config = Config(
table='_user_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.PERSONAL},
parent_key='user_profile__in',
)
Config(
table='_user_recipient',
model=Recipient,
virtual_parent=user_subscription_config,
id_source=('_user_subscription', 'recipient'),
)
#
stream_subscription_config = Config(
table='_stream_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.STREAM},
parent_key='user_profile__in',
)
stream_recipient_config = Config(
table='_stream_recipient',
model=Recipient,
virtual_parent=stream_subscription_config,
id_source=('_stream_subscription', 'recipient'),
)
Config(
table='zerver_stream',
model=Stream,
virtual_parent=stream_recipient_config,
id_source=('_stream_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
post_process_data=sanity_check_stream_data
)
#
Config(
custom_tables=[
'_huddle_recipient',
'_huddle_subscription',
'zerver_huddle',
],
normal_parent=user_profile_config,
custom_fetch=fetch_huddle_objects,
)
# Now build permanent tables from our temp tables.
Config(
table='zerver_recipient',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_recipient',
'_stream_recipient',
'_huddle_recipient',
],
)
Config(
table='zerver_subscription',
virtual_parent=user_profile_config,
concat_and_destroy=[
'_user_subscription',
'_stream_subscription',
'_huddle_subscription',
]
)
return realm_config
def sanity_check_stream_data(response, config, context):
# type: (TableData, Config, Context) -> None
if context['exportable_user_ids'] is not None:
# If we restrict which user ids are exportable,
# the way that we find # streams is a little too
# complex to have a sanity check.
return
actual_streams = set([stream.name for stream in Stream.objects.filter(realm=response["zerver_realm"][0]['id'])])
streams_in_response = set([stream['name'] for stream in response['zerver_stream']])
if streams_in_response != actual_streams:
print(streams_in_response - actual_streams)
print(actual_streams - streams_in_response)
raise Exception('''
zerver_stream data does not match
Stream.objects.all().
Please investigate!
''')
def fetch_user_profile(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
exportable_user_ids = context['exportable_user_ids']
query = UserProfile.objects.filter(realm_id=realm.id)
exclude = ['password', 'api_key']
rows = make_raw(list(query), exclude=exclude)
normal_rows = [] # type: List[Record]
dummy_rows = [] # type: List[Record]
for row in rows:
if exportable_user_ids is not None:
if row['id'] in exportable_user_ids:
assert not row['is_mirror_dummy']
else:
# Convert non-exportable users to
# inactive is_mirror_dummy users.
row['is_mirror_dummy'] = True
row['is_active'] = False
if row['is_mirror_dummy']:
dummy_rows.append(row)
else:
normal_rows.append(row)
response['zerver_userprofile'] = normal_rows
response['zerver_userprofile_mirrordummy'] = dummy_rows
def fetch_user_profile_cross_realm(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
if realm.string_id == "zulip":
response['zerver_userprofile_crossrealm'] = []
else:
response['zerver_userprofile_crossrealm'] = [dict(email=x.email, id=x.id) for x in [
get_user_profile_by_email(settings.NOTIFICATION_BOT),
get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT),
get_user_profile_by_email(settings.WELCOME_BOT),
]]
def fetch_attachment_data(response, realm_id, message_ids):
# type: (TableData, int, Set[int]) -> None
filter_args = {'realm_id': realm_id}
query = Attachment.objects.filter(**filter_args)
response['zerver_attachment'] = make_raw(list(query))
floatify_datetime_fields(response, 'zerver_attachment')
'''
We usually export most messages for the realm, but not
quite ALL messages for the realm. So, we need to
clean up our attachment data to have correct
values for response['zerver_attachment'][<n>]['messages'].
'''
for row in response['zerver_attachment']:
filterer_message_ids = set(row['messages']).intersection(message_ids)
row['messages'] = sorted(list(filterer_message_ids))
'''
Attachments can be connected to multiple messages, although
it's most common to have just one message. Regardless,
if none of those message(s) survived the filtering above
for a particular attachment, then we won't export the
attachment row.
'''
response['zerver_attachment'] = [
row for row in response['zerver_attachment']
if row['messages']]
def fetch_huddle_objects(response, config, context):
# type: (TableData, Config, Context) -> None
realm = context['realm']
user_profile_ids = set(r['id'] for r in response[config.parent.table])
# First we get all huddles involving someone in the realm.
realm_huddle_subs = Subscription.objects.select_related("recipient").filter(recipient__type=Recipient.HUDDLE,
user_profile__in=user_profile_ids)
realm_huddle_recipient_ids = set(sub.recipient_id for sub in realm_huddle_subs)
# Mark all Huddles whose recipient ID contains a cross-realm user.
unsafe_huddle_recipient_ids = set()
for sub in Subscription.objects.select_related().filter(recipient__in=realm_huddle_recipient_ids):
if sub.user_profile.realm != realm:
# In almost every case the other realm will be zulip.com
unsafe_huddle_recipient_ids.add(sub.recipient_id)
# Now filter down to just those huddles that are entirely within the realm.
#
# This is important for ensuring that the User objects needed
# to import it on the other end exist (since we're only
# exporting the users from this realm), at the cost of losing
# some of these cross-realm messages.
huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]
huddle_recipient_ids = set(sub.recipient_id for sub in huddle_subs)
huddle_ids = set(sub.recipient.type_id for sub in huddle_subs)
huddle_subscription_dicts = make_raw(huddle_subs)
huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))
response['_huddle_recipient'] = huddle_recipients
response['_huddle_subscription'] = huddle_subscription_dicts
response['zerver_huddle'] = make_raw(Huddle.objects.filter(id__in=huddle_ids))
def fetch_usermessages(realm, message_ids, user_profile_ids, message_filename):
# type: (Realm, Set[int], Set[int], Path) -> List[Record]
# UserMessage export security rule: You can export UserMessages
# for the messages you exported for the users in your realm.
user_message_query = UserMessage.objects.filter(user_profile__realm=realm,
message_id__in=message_ids)
user_message_chunk = []
for user_message in user_message_query:
if user_message.user_profile_id not in user_profile_ids:
continue
user_message_obj = model_to_dict(user_message)
user_message_obj['flags_mask'] = user_message.flags.mask
del user_message_obj['flags']
user_message_chunk.append(user_message_obj)
logging.info("Fetched UserMessages for %s" % (message_filename,))
return user_message_chunk
def export_usermessages_batch(input_path, output_path):
# type: (Path, Path) -> None
"""As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command)."""
with open(input_path, "r") as input_file:
output = ujson.loads(input_file.read())
message_ids = [item['id'] for item in output['zerver_message']]
user_profile_ids = set(output['zerver_userprofile_ids'])
del output['zerver_userprofile_ids']
realm = Realm.objects.get(id=output['realm_id'])
del output['realm_id']
output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids, output_path)
write_message_export(output_path, output)
os.unlink(input_path)
def write_message_export(message_filename, output):
# type: (Path, MessageOutput) -> None
write_data_to_file(output_file=message_filename, data=output)
logging.info("Dumped to %s" % (message_filename,))
def export_partial_message_files(realm, response, chunk_size=1000, output_dir=None):
# type: (Realm, TableData, int, Path) -> Set[int]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export")
def get_ids(records):
# type: (List[Record]) -> Set[int]
return set(x['id'] for x in records)
# Basic security rule: You can export everything either...
# - sent by someone in your exportable_user_ids
# OR
# - received by someone in your exportable_user_ids (which
# equates to a recipient object we are exporting)
#
# TODO: In theory, you should be able to export messages in
# cross-realm PM threads; currently, this only exports cross-realm
# messages received by your realm that were sent by Zulip system
# bots (e.g. emailgateway, notification-bot).
# Here, "we" and "us" refers to the inner circle of users who
# were specified as being allowed to be exported. "Them"
# refers to other users.
user_ids_for_us = get_ids(
response['zerver_userprofile']
)
recipient_ids_for_us = get_ids(response['zerver_recipient'])
ids_of_our_possible_senders = get_ids(
response['zerver_userprofile'] +
response['zerver_userprofile_mirrordummy'] +
response['zerver_userprofile_crossrealm'])
ids_of_non_exported_possible_recipients = ids_of_our_possible_senders - user_ids_for_us
recipients_for_them = Recipient.objects.filter(
type=Recipient.PERSONAL,
type_id__in=ids_of_non_exported_possible_recipients).values("id")
recipient_ids_for_them = get_ids(recipients_for_them)
# We capture most messages here, since the
# recipients we subscribe to are also the
# recipients of most messages we send.
messages_we_received = Message.objects.filter(
sender__in=ids_of_our_possible_senders,
recipient__in=recipient_ids_for_us,
).order_by('id')
# This should pick up stragglers; messages we sent
# where we the recipient wasn't subscribed to by any of
# us (such as PMs to "them").
messages_we_sent_to_them = Message.objects.filter(
sender__in=user_ids_for_us,
recipient__in=recipient_ids_for_them,
).order_by('id')
message_queries = [
messages_we_received,
messages_we_sent_to_them
]
all_message_ids = set() # type: Set[int]
dump_file_id = 1
for message_query in message_queries:
dump_file_id = write_message_partial_for_query(
realm=realm,
message_query=message_query,
dump_file_id=dump_file_id,
all_message_ids=all_message_ids,
output_dir=output_dir,
chunk_size=chunk_size,
user_profile_ids=user_ids_for_us,
)
return all_message_ids
def write_message_partial_for_query(realm, message_query, dump_file_id,
all_message_ids, output_dir,
chunk_size, user_profile_ids):
# type: (Realm, Any, int, Set[int], Path, int, Set[int]) -> int
min_id = -1
while True:
actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]
message_chunk = make_raw(actual_query)
message_ids = set(m['id'] for m in message_chunk)
assert len(message_ids.intersection(all_message_ids)) == 0
all_message_ids.update(message_ids)
if len(message_chunk) == 0:
break
# Figure out the name of our shard file.
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
message_filename += '.partial'
logging.info("Fetched Messages for %s" % (message_filename,))
# Clean up our messages.
table_data = {} # type: TableData
table_data['zerver_message'] = message_chunk
floatify_datetime_fields(table_data, 'zerver_message')
# Build up our output for the .partial file, which needs
# a list of user_profile_ids to search for (as well as
# the realm id).
output = {} # type: MessageOutput
output['zerver_message'] = table_data['zerver_message']
output['zerver_userprofile_ids'] = list(user_profile_ids)
output['realm_id'] = realm.id
# And write the data.
write_message_export(message_filename, output)
min_id = max(message_ids)
dump_file_id += 1
return dump_file_id
def export_uploads_and_avatars(realm, output_dir):
# type: (Realm, Path) -> None
uploads_output_dir = os.path.join(output_dir, 'uploads')
avatars_output_dir = os.path.join(output_dir, 'avatars')
for output_dir in (uploads_output_dir, avatars_output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if settings.LOCAL_UPLOADS_DIR:
# Small installations and developers will usually just store files locally.
export_uploads_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "files"),
output_dir=uploads_output_dir)
export_avatars_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=avatars_output_dir)
else:
# Some bigger installations will have their data stored on S3.
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=avatars_output_dir,
processing_avatars=True)
export_files_from_s3(realm,
settings.S3_AUTH_UPLOADS_BUCKET,
output_dir=uploads_output_dir)
def export_files_from_s3(realm, bucket_name, output_dir, processing_avatars=False):
# type: (Realm, str, Path, bool) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records = []
logging.info("Downloading uploaded files from %s" % (bucket_name))
avatar_hash_values = set()
user_ids = set()
if processing_avatars:
bucket_list = bucket.list()
for user_profile in UserProfile.objects.filter(realm=realm):
avatar_hash = user_avatar_hash(user_profile.email)
avatar_hash_values.add(avatar_hash)
avatar_hash_values.add(avatar_hash + ".original")
user_ids.add(user_profile.id)
else:
bucket_list = bucket.list(prefix="%s/" % (realm.id,))
if settings.EMAIL_GATEWAY_BOT is not None:
email_gateway_bot = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT)
else:
email_gateway_bot = None
count = 0
for bkey in bucket_list:
if processing_avatars and bkey.name not in avatar_hash_values:
continue
key = bucket.get_key(bkey.name)
# This can happen if an email address has moved realms
if 'realm_id' in key.metadata and key.metadata['realm_id'] != str(realm.id):
if email_gateway_bot is None or key.metadata['user_profile_id'] != str(email_gateway_bot.id):
raise Exception("Key metadata problem: %s %s / %s" % (key.name, key.metadata, realm.id))
# Email gateway bot sends messages, potentially including attachments, cross-realm.
print("File uploaded by email gateway bot: %s / %s" % (key.name, key.metadata))
elif processing_avatars:
if 'user_profile_id' not in key.metadata:
raise Exception("Missing user_profile_id in key metadata: %s" % (key.metadata,))
if int(key.metadata['user_profile_id']) not in user_ids:
raise Exception("Wrong user_profile_id in key metadata: %s" % (key.metadata,))
elif 'realm_id' not in key.metadata:
raise Exception("Missing realm_id in key metadata: %s" % (key.metadata,))
record = dict(s3_path=key.name, bucket=bucket_name,
size=key.size, last_modified=key.last_modified,
content_type=key.content_type, md5=key.md5)
record.update(key.metadata)
# A few early avatars don't have 'realm_id' on the object; fix their metadata
user_profile = get_user_profile_by_id(record['user_profile_id'])
if 'realm_id' not in record:
record['realm_id'] = user_profile.realm_id
record['user_profile_email'] = user_profile.email
if processing_avatars:
dirname = output_dir
filename = os.path.join(dirname, key.name)
record['path'] = key.name
else:
fields = key.name.split('/')
if len(fields) != 3:
raise Exception("Suspicious key %s" % (key.name))
dirname = os.path.join(output_dir, fields[1])
filename = os.path.join(dirname, fields[2])
record['path'] = os.path.join(fields[1], fields[2])
if not os.path.exists(dirname):
os.makedirs(dirname)
key.get_contents_to_filename(filename)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_uploads_from_local(realm, local_dir, output_dir):
# type: (Realm, Path, Path) -> None
count = 0
records = []
for attachment in Attachment.objects.filter(realm_id=realm.id):
local_path = os.path.join(local_dir, attachment.path_id)
output_path = os.path.join(output_dir, attachment.path_id)
mkdir_p(os.path.dirname(output_path))
subprocess.check_call(["cp", "-a", local_path, output_path])
stat = os.stat(local_path)
record = dict(realm_id=attachment.realm_id,
user_profile_id=attachment.owner.id,
user_profile_email=attachment.owner.email,
s3_path=attachment.path_id,
path=attachment.path_id,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def export_avatars_from_local(realm, local_dir, output_dir):
# type: (Realm, Path, Path) -> None
count = 0
records = []
users = list(UserProfile.objects.filter(realm=realm))
users += [
get_user_profile_by_email(settings.NOTIFICATION_BOT),
get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT),
get_user_profile_by_email(settings.WELCOME_BOT),
]
for user in users:
if user.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
continue
avatar_hash = user_avatar_hash(user.email)
wildcard = os.path.join(local_dir, avatar_hash + '.*')
for local_path in glob.glob(wildcard):
logging.info('Copying avatar file for user %s from %s' % (
user.email, local_path))
fn = os.path.basename(local_path)
output_path = os.path.join(output_dir, fn)
mkdir_p(str(os.path.dirname(output_path)))
subprocess.check_call(["cp", "-a", str(local_path), str(output_path)])
stat = os.stat(local_path)
record = dict(realm_id=realm.id,
user_profile_id=user.id,
user_profile_email=user.email,
s3_path=fn,
path=fn,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s" % (count,))
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
ujson.dump(records, records_file, indent=4)
def do_write_stats_file_for_realm_export(output_dir):
# type: (Path) -> None
stats_file = os.path.join(output_dir, 'stats.txt')
realm_file = os.path.join(output_dir, 'realm.json')
attachment_file = os.path.join(output_dir, 'attachment.json')
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
fns = sorted([attachment_file] + message_files + [realm_file])
logging.info('Writing stats file: %s\n' % (stats_file,))
with open(stats_file, 'w') as f:
for fn in fns:
f.write(os.path.basename(fn) + '\n')
payload = open(fn).read()
data = ujson.loads(payload)
for k in sorted(data):
f.write('%5d %s\n' % (len(data[k]), k))
f.write('\n')
avatar_file = os.path.join(output_dir, 'avatars/records.json')
uploads_file = os.path.join(output_dir, 'uploads/records.json')
for fn in [avatar_file, uploads_file]:
f.write(fn+'\n')
payload = open(fn).read()
data = ujson.loads(payload)
f.write('%5d records\n' % len(data))
f.write('\n')
def do_export_realm(realm, output_dir, threads, exportable_user_ids=None):
# type: (Realm, Path, int, Set[int]) -> None
response = {} # type: TableData
# We need at least one thread running to export
# UserMessage rows. The management command should
# enforce this for us.
if not settings.TEST_SUITE:
assert threads >= 1
assert os.path.exists("./manage.py")
realm_config = get_realm_config()
create_soft_link(source=output_dir, in_progress=True)
logging.info("Exporting data from get_realm_config()...")
export_from_config(
response=response,
config=realm_config,
seed_object=realm,
context=dict(realm=realm, exportable_user_ids=exportable_user_ids)
)
logging.info('...DONE with get_realm_config() data')
export_file = os.path.join(output_dir, "realm.json")
write_data_to_file(output_file=export_file, data=response)
sanity_check_output(response)
logging.info("Exporting uploaded files and avatars")
export_uploads_and_avatars(realm, output_dir)
# We (sort of) export zerver_message rows here. We write
# them to .partial files that are subsequently fleshed out
# by parallel processes to add in zerver_usermessage data.
# This is for performance reasons, of course. Some installations
# have millions of messages.
logging.info("Exporting .partial files messages")
message_ids = export_partial_message_files(realm, response, output_dir=output_dir)
logging.info('%d messages were exported' % (len(message_ids)))
# zerver_attachment
export_attachment_table(realm=realm, output_dir=output_dir, message_ids=message_ids)
# Start parallel jobs to export the UserMessage objects.
launch_user_message_subprocesses(threads=threads, output_dir=output_dir)
logging.info("Finished exporting %s" % (realm.string_id))
create_soft_link(source=output_dir, in_progress=False)
def export_attachment_table(realm, output_dir, message_ids):
# type: (Realm, Path, Set[int]) -> None
response = {} # type: TableData
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
output_file = os.path.join(output_dir, "attachment.json")
logging.info('Writing attachment table data to %s' % (output_file,))
write_data_to_file(output_file=output_file, data=response)
def create_soft_link(source, in_progress=True):
# type: (Path, bool) -> None
is_done = not in_progress
in_progress_link = '/tmp/zulip-export-in-progress'
done_link = '/tmp/zulip-export-most-recent'
if in_progress:
new_target = in_progress_link
else:
subprocess.check_call(['rm', '-f', in_progress_link])
new_target = done_link
subprocess.check_call(["ln", "-nsf", source, new_target])
if is_done:
logging.info('See %s for output files' % (new_target,))
def launch_user_message_subprocesses(threads, output_dir):
# type: (int, Path) -> None
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))
def run_job(shard):
# type: (str) -> int
subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
str(output_dir), '--thread', shard])
return 0
for (status, job) in run_parallel(run_job,
[str(x) for x in range(0, threads)],
threads=threads):
print("Shard %s finished, status %s" % (job, status))
def do_export_user(user_profile, output_dir):
# type: (UserProfile, Path) -> None
response = {} # type: TableData
export_single_user(user_profile, response)
export_file = os.path.join(output_dir, "user.json")
write_data_to_file(output_file=export_file, data=response)
logging.info("Exporting messages")
export_messages_single_user(user_profile, output_dir=output_dir)
def export_single_user(user_profile, response):
# type: (UserProfile, TableData) -> None
config = get_single_user_config()
export_from_config(
response=response,
config=config,
seed_object=user_profile,
)
def get_single_user_config():
# type: () -> Config
# zerver_userprofile
user_profile_config = Config(
table='zerver_userprofile',
is_seeded=True,
exclude=['password', 'api_key'],
)
# zerver_subscription
subscription_config = Config(
table='zerver_subscription',
model=Subscription,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# zerver_recipient
recipient_config = Config(
table='zerver_recipient',
model=Recipient,
virtual_parent=subscription_config,
id_source=('zerver_subscription', 'recipient'),
)
# zerver_stream
Config(
table='zerver_stream',
model=Stream,
virtual_parent=recipient_config,
id_source=('zerver_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
)
return user_profile_config
def export_messages_single_user(user_profile, chunk_size=1000, output_dir=None):
# type: (UserProfile, int, Path) -> None
user_message_query = UserMessage.objects.filter(user_profile=user_profile).order_by("id")
min_id = -1
dump_file_id = 1
while True:
actual_query = user_message_query.select_related("message", "message__sending_client").filter(id__gt=min_id)[0:chunk_size]
user_message_chunk = [um for um in actual_query]
user_message_ids = set(um.id for um in user_message_chunk)
if len(user_message_chunk) == 0:
break
message_chunk = []
for user_message in user_message_chunk:
item = model_to_dict(user_message.message)
item['flags'] = user_message.flags_list()
item['flags_mask'] = user_message.flags.mask
# Add a few nice, human-readable details
item['sending_client_name'] = user_message.message.sending_client.name
item['display_recipient'] = get_display_recipient(user_message.message.recipient)
message_chunk.append(item)
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
logging.info("Fetched Messages for %s" % (message_filename,))
output = {'zerver_message': message_chunk}
floatify_datetime_fields(output, 'zerver_message')
write_message_export(message_filename, output)
min_id = max(user_message_ids)
dump_file_id += 1
# Code from here is the realm import code path
# id_maps is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicity initialize id_maps with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
id_maps = {
'client': {},
'user_profile': {},
} # type: Dict[str, Dict[int, int]]
def update_id_map(table, old_id, new_id):
# type: (TableName, int, int) -> None
if table not in id_maps:
raise Exception('''
Table %s is not initialized in id_maps, which could
mean that we have not thought through circular
dependencies.
''' % (table,))
id_maps[table][old_id] = new_id
def fix_datetime_fields(data, table):
# type: (TableData, TableName) -> None
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=timezone_utc)
def convert_to_id_fields(data, table, field_name):
# type: (TableData, TableName, Field) -> None
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data, table, field_name, related_table, verbose=False):
# type: (TableData, TableName, Field, TableName, bool) -> None
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = id_maps[related_table]
for item in data[table]:
old_id = item[field_name]
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s%s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
item[field_name + "_id"] = new_id
del item[field_name]
def fix_bitfield_keys(data, table, field_name):
# type: (TableData, TableName, Field) -> None
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def bulk_import_model(data, model, table, dump_file_id=None):
# type: (TableData, Any, TableName, str) -> None
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s." % (model, table))
else:
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data, model, table):
# type: (TableData, Any, TableName) -> None
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def import_uploads_local(import_dir, processing_avatars=False):
# type: (Path, bool) -> None
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
for record in records:
if processing_avatars:
# For avatars, we need to rehash the user's email with the
# new server's avatar salt
avatar_hash = user_avatar_hash(record['user_profile_email'])
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_hash)
if record['s3_path'].endswith('.original'):
file_path += '.original'
else:
file_path += '.png'
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", record['s3_path'])
orig_file_path = os.path.join(import_dir, record['path'])
if not os.path.exists(os.path.dirname(file_path)):
subprocess.check_call(["mkdir", "-p", os.path.dirname(file_path)])
shutil.copy(orig_file_path, file_path)
def import_uploads_s3(bucket_name, import_dir, processing_avatars=False):
# type: (str, Path, bool) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read())
for record in records:
key = Key(bucket)
if processing_avatars:
# For avatars, we need to rehash the user's email with the
# new server's avatar salt
avatar_hash = user_avatar_hash(record['user_profile_email'])
key.key = avatar_hash
if record['s3_path'].endswith('.original'):
key.key += '.original'
else:
key.key = record['s3_path']
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in id_maps["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,))
user_profile_id = id_maps["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
key.set_metadata("user_profile_id", str(user_profile.id))
key.set_metadata("realm_id", str(user_profile.realm_id))
key.set_metadata("orig_last_modified", record['last_modified'])
headers = {u'Content-Type': record['content_type']}
key.set_contents_from_filename(os.path.join(import_dir, record['path']), headers=headers)
def import_uploads(import_dir, processing_avatars=False):
# type: (Path, bool) -> None
if processing_avatars:
logging.info("Importing avatars")
else:
logging.info("Importing uploaded files")
if settings.LOCAL_UPLOADS_DIR:
import_uploads_local(import_dir, processing_avatars=processing_avatars)
else:
if processing_avatars:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
import_uploads_s3(bucket_name, import_dir, processing_avatars=processing_avatars)
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir):
# type: (Path) -> None
logging.info("Importing realm dump %s" % (import_dir,))
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
logging.info("Importing realm data from %s" % (realm_data_filename,))
with open(realm_data_filename) as f:
data = ujson.load(f)
convert_to_id_fields(data, 'zerver_realm', 'notifications_stream')
fix_datetime_fields(data, 'zerver_realm')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id = int(realm.notifications_stream_id)
else:
notifications_stream_id = None
realm.notifications_stream_id = None
realm.save()
bulk_import_client(data, Client, 'zerver_client')
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
convert_to_id_fields(data, 'zerver_stream', 'realm')
bulk_import_model(data, Stream, 'zerver_stream')
realm.notifications_stream_id = notifications_stream_id
realm.save()
convert_to_id_fields(data, "zerver_defaultstream", 'stream')
for (table, model) in realm_tables:
convert_to_id_fields(data, table, 'realm')
bulk_import_model(data, model, table)
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s" % (item['id'], get_user_profile_by_email(item['email']).id))
new_user_id = get_user_profile_by_email(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
fix_datetime_fields(data, 'zerver_userprofile')
convert_to_id_fields(data, 'zerver_userprofile', 'realm')
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
convert_to_id_fields(data, 'zerver_userprofile', 'default_sending_stream')
convert_to_id_fields(data, 'zerver_userprofile', 'default_events_register_stream')
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = random_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
if 'zerver_huddle' in data:
bulk_import_model(data, Huddle, 'zerver_huddle')
bulk_import_model(data, Recipient, 'zerver_recipient')
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
convert_to_id_fields(data, 'zerver_subscription', 'recipient')
bulk_import_model(data, Subscription, 'zerver_subscription')
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
bulk_import_model(data, UserPresence, 'zerver_userpresence')
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
bulk_import_model(data, UserActivity, 'zerver_useractivity')
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
bulk_import_model(data, UserActivityInterval, 'zerver_useractivityinterval')
# Import uploaded files and avatars
import_uploads(os.path.join(import_dir, "avatars"), processing_avatars=True)
import_uploads(os.path.join(import_dir, "uploads"))
# Import zerver_message and zerver_usermessage
import_message_data(import_dir)
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s" % (fn,))
with open(fn) as f:
data = ujson.load(f)
import_attachments(data)
def import_message_data(import_dir):
# type: (Path) -> None
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
logging.info("Importing message dump %s" % (message_filename,))
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
convert_to_id_fields(data, 'zerver_message', 'recipient')
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
bulk_import_model(data, Message, 'zerver_message')
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
convert_to_id_fields(data, 'zerver_usermessage', 'message')
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_model(data, UserMessage, 'zerver_usermessage')
dump_file_id += 1
def import_attachments(data):
# type: (TableData) -> None
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
convert_to_id_fields(data, 'zerver_attachment', 'realm')
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows = [] # type: List[Record]
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row = {} # type: Record
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = fk_id
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data = {m2m_table_name: m2m_rows} # type: TableData
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Next, load the parent rows.
bulk_import_model(data, parent_model, parent_db_table_name)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = '''
insert into %s (%s, %s) values(%%s, %%s);''' % (m2m_table_name,
parent_id,
child_id)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
cursor.executemany(sql_template, tups)
logging.info('Successfully imported M2M table %s' % (m2m_table_name,))
|
SmartPeople/zulip
|
zerver/lib/export.py
|
Python
|
apache-2.0
| 61,414
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import unittest
from shutil import rmtree
from StringIO import StringIO
import simplejson
import xml.dom.minidom
from webob import Request
from swift.account.server import AccountController, ACCOUNT_LISTING_LIMIT
from swift.common.utils import normalize_timestamp
class TestAccountController(unittest.TestCase):
""" Test swift.account_server.AccountController """
def setUp(self):
""" Set up for testing swift.account_server.AccountController """
self.testdir = os.path.join(os.path.dirname(__file__), 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
""" Tear down for testing swift.account_server.AccountController """
try:
rmtree(self.testdir)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = self.controller.DELETE(req)
self.assertEquals(resp.status_int, 404)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = self.controller.DELETE(req)
self.assertEquals(resp.status_int, 204)
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = self.controller.DELETE(req)
# We now allow deleting non-empty accounts
self.assertEquals(resp.status_int, 204)
def test_DELETE_now_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = self.controller.DELETE(req)
self.assertEquals(resp.status_int, 204)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 404)
def test_HEAD_empty_account(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers['x-account-container-count'], 0)
self.assertEquals(resp.headers['x-account-object-count'], 0)
self.assertEquals(resp.headers['x-account-bytes-used'], 0)
def test_HEAD_with_containers(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers['x-account-container-count'], 2)
self.assertEquals(resp.headers['x-account-object-count'], 0)
self.assertEquals(resp.headers['x-account-bytes-used'], 0)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '5'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers['x-account-container-count'], 2)
self.assertEquals(resp.headers['x-account-object-count'], 4)
self.assertEquals(resp.headers['x-account-bytes-used'], 6)
def test_PUT_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-PUT-Timestamp': normalize_timestamp(1),
'X-DELETE-Timestamp': normalize_timestamp(0),
'X-Object-Count': '1',
'X-Bytes-Used': '1',
'X-Timestamp': normalize_timestamp(0)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 404)
def test_PUT(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
def test_PUT_after_DELETE(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = self.controller.DELETE(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 403)
self.assertEquals(resp.body, 'Recently deleted')
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test2': 'Value2'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-account-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assert_('x-account-meta-test' not in resp.headers)
def test_POST_HEAD_metadata(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'Value')
# Update metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assert_('x-account-meta-test' not in resp.headers)
def test_GET_not_found_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 404)
def test_GET_not_found_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 404)
def test_GET_not_found_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 404)
def test_GET_empty_account_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
def test_GET_empty_account_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
def test_GET_empty_account_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
def test_GET_over_limit(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?limit=%d' %
(ACCOUNT_LISTING_LIMIT + 1), environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 412)
def test_GET_with_containers_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'), ['c1', 'c2'])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
def test_GET_with_containers_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body),
[{'count': 0, 'bytes': 0, 'name': 'c1'},
{'count': 0, 'bytes': 0, 'name': 'c2'}])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body),
[{'count': 1, 'bytes': 2, 'name': 'c1'},
{'count': 3, 'bytes': 4, 'name': 'c2'}])
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
def test_GET_with_containers_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEquals(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 2)
self.assertEquals(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '0')
self.assertEquals(listing[-1].nodeName, 'container')
container = \
[n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEquals(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 2)
self.assertEquals(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '1')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '2')
self.assertEquals(listing[-1].nodeName, 'container')
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '3')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '4')
self.assertEquals(resp.charset, 'utf-8')
def test_GET_limit_marker_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
for c in xrange(5):
req = Request.blank('/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?limit=3',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'), ['c0', 'c1', 'c2'])
req = Request.blank('/sda1/p/a?limit=3&marker=c2',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'), ['c3', 'c4'])
def test_GET_limit_marker_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
for c in xrange(5):
req = Request.blank('/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?limit=3&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c0'},
{'count': 2, 'bytes': 3, 'name': 'c1'},
{'count': 2, 'bytes': 3, 'name': 'c2'}])
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c3'},
{'count': 2, 'bytes': 3, 'name': 'c4'}])
def test_GET_limit_marker_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
for c in xrange(5):
req = Request.blank('/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(c)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?limit=3&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEquals(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 3)
self.assertEquals(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c0')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '3')
self.assertEquals(listing[-1].nodeName, 'container')
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '3')
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEquals(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 2)
self.assertEquals(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c3')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '3')
self.assertEquals(listing[-1].nodeName, 'container')
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEquals(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEquals(node.firstChild.nodeValue, 'c4')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEquals(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEquals(node.firstChild.nodeValue, '3')
def test_GET_accept_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = '*/*'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'c1\n')
def test_GET_accept_application_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/*'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(len(simplejson.loads(resp.body)), 1)
def test_GET_accept_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(len(simplejson.loads(resp.body)), 1)
def test_GET_accept_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEquals(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 1)
def test_GET_accept_conflicting(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'c1\n')
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 400)
self.assertEquals(resp.body, 'bad accept header: application/xml*')
def test_GET_prefix_delimeter_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = self.controller.PUT(req)
for first in range(3):
req = Request.blank('/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
for second in range(3):
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'), ['sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'),
['sub.0', 'sub.0.', 'sub.1', 'sub.1.', 'sub.2', 'sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body.strip().split('\n'),
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimeter_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = self.controller.PUT(req)
for first in range(3):
req = Request.blank('/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
for second in range(3):
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)], ['s:sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)],
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)],
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimeter_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = self.controller.PUT(req)
for first in range(3):
req = Request.blank('/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
for second in range(3):
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
self.controller.PUT(req)
req = Request.blank('/sda1/p/a?delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEquals(listing, ['s:sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEquals(listing,
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEquals(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_params_utf8(self):
self.controller.PUT(Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}))
for param in ('delimiter', 'format', 'limit', 'marker', 'prefix'):
req = Request.blank('/sda1/p/a?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 400)
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = self.controller.GET(req)
self.assert_(resp.status_int in (204, 412), resp.status_int)
def test_put_auto_create(self):
headers = {'x-put-timestamp': normalize_timestamp(1),
'x-delete-timestamp': normalize_timestamp(0),
'x-object-count': '0',
'x-bytes-used': '0'}
resp = self.controller.PUT(Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)))
self.assertEquals(resp.status_int, 404)
resp = self.controller.PUT(Request.blank('/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)))
self.assertEquals(resp.status_int, 201)
resp = self.controller.PUT(Request.blank('/sda1/p/a/.c',
environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers)))
self.assertEquals(resp.status_int, 404)
if __name__ == '__main__':
unittest.main()
|
pvo/swift
|
test/unit/account/test_server.py
|
Python
|
apache-2.0
| 51,979
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Tasks related to user actions."""
from __future__ import absolute_import, division, print_function
from functools import wraps
from flask import current_app
from werkzeug import secure_filename
from timeout_decorator import TimeoutError
from inspire_utils.record import get_value
from inspirehep.modules.workflows.utils import (
get_pdf_in_workflow,
log_workflows_action,
)
from inspirehep.utils.record import get_arxiv_id
from inspirehep.utils.url import is_pdf_link
from .refextract import extract_references
from ..utils import download_file_to_workflow, with_debug_logging
def mark(key, value):
"""Mark the workflow object by putting a value in a key in extra_data."""
@with_debug_logging
@wraps(mark)
def _mark(obj, eng):
obj.extra_data[key] = value
_mark.__doc__ = 'Mark the workflow object with %s:%s.' % (key, value)
return _mark
def is_marked(key):
"""Check if the workflow object has a specific mark."""
@with_debug_logging
@wraps(mark)
def _mark(obj, eng):
return key in obj.extra_data and obj.extra_data[key]
_mark.__doc__ = 'Check if the workflow object has the mark %s.' % key
return _mark
@with_debug_logging
def is_record_accepted(obj, *args, **kwargs):
"""Check if the record was approved."""
return obj.extra_data.get("approved", False)
@with_debug_logging
def shall_halt_workflow(obj, *args, **kwargs):
"""Check if the workflow shall be halted."""
return obj.extra_data.get("halt_workflow", False)
def in_production_mode(*args, **kwargs):
"""Check if we are in production mode"""
return current_app.config.get(
"PRODUCTION_MODE", False
)
@with_debug_logging
def add_core(obj, eng):
"""Mark a record as CORE if it was approved as CORE."""
if 'core' in obj.extra_data:
obj.data['core'] = obj.extra_data['core']
def halt_record(action=None, message=None):
"""Halt the workflow for approval with optional action."""
@with_debug_logging
@wraps(halt_record)
def _halt_record(obj, eng):
eng.halt(
action=obj.extra_data.get("halt_action") or action,
msg=obj.extra_data.get("halt_message") or message,
)
_halt_record.__doc__ = (
'Halt the workflow object, action=%s, message=%s' % (action, message)
)
_halt_record.description = '"%s"' % (message or 'unspecified')
return _halt_record
@with_debug_logging
def update_note(metadata):
"""Check if the record was approved as CORE."""
new_notes = []
for note in metadata.get("public_notes", []):
if note.get("value", "") == "*Brief entry*":
note = {"value": "*Temporary entry*"}
new_notes.append(note)
if new_notes:
metadata["public_notes"] = new_notes
return metadata
def reject_record(message):
"""Reject record with message."""
@with_debug_logging
@wraps(reject_record)
def _reject_record(obj, *args, **kwargs):
relevance_prediction = obj.extra_data.get("relevance_prediction")
log_workflows_action(
action="reject_record",
relevance_prediction=relevance_prediction,
object_id=obj.id,
user_id=None,
source="workflow",
)
obj.extra_data["approved"] = False
obj.extra_data["reason"] = message
obj.log.info(message)
_reject_record.__doc__ = 'Reject the record, message=%s' % message
return _reject_record
def _is_auto_rejected(workflow_obj):
relevance_prediction = workflow_obj.extra_data.get('relevance_prediction')
classification_results = workflow_obj.extra_data.get('classifier_results')
if not relevance_prediction or not classification_results:
return False
decision = relevance_prediction.get('decision')
all_class_results = classification_results.get('complete_output')
core_keywords = all_class_results.get('core_keywords')
return decision.lower() == 'rejected' and len(core_keywords) == 0
@with_debug_logging
def is_record_relevant(obj, eng):
"""Shall we halt this workflow for potential acceptance or just reject?"""
# We do not auto-reject any user submissions
if is_submission(obj, eng):
return True
if _is_auto_rejected(workflow_obj=obj):
return False
return True
@with_debug_logging
def is_experimental_paper(obj, eng):
"""Check if the record is an experimental paper."""
categories = list(
get_value(obj.data, "arxiv_eprints.categories", [[]])[0]
) + list(get_value(obj.data, "inspire_categories.term", []))
categories_to_check = [
"hep-ex", "nucl-ex", "astro-ph", "astro-ph.IM", "astro-ph.CO",
"astro-ph.EP", "astro-ph.GA", "astro-ph.HE", "astro-ph.SR",
"physics.ins-det", "Experiment-HEP", "Experiment-Nucl",
"Astrophysics", "Instrumentation"
]
for experimental_category in categories_to_check:
if experimental_category in categories:
return True
return False
@with_debug_logging
def is_arxiv_paper(obj, *args, **kwargs):
"""Check if the record is from arXiv."""
arxiv_id = get_arxiv_id(obj.data)
categories = get_value(obj.data, 'arxiv_eprints.categories')
if arxiv_id or categories:
return True
return False
@with_debug_logging
def is_submission(obj, eng):
"""Is this a submission?"""
source = obj.data.get('acquisition_source')
if source:
return source.get('method') == "submitter"
return False
@with_debug_logging
def submission_fulltext_download(obj, eng):
submission_pdf = obj.extra_data.get('submission_pdf')
if submission_pdf and is_pdf_link(submission_pdf):
filename = secure_filename('fulltext.pdf')
pdf = download_file_to_workflow(
workflow=obj,
name=filename,
url=submission_pdf,
)
if pdf:
obj.log.info('PDF provided by user from %s', submission_pdf)
return obj.files[filename].file.uri
else:
obj.log.info('Cannot fetch PDF provided by user from %s', submission_pdf)
def prepare_update_payload(extra_data_key="update_payload"):
@with_debug_logging
@wraps(prepare_update_payload)
def _prepare_update_payload(obj, eng):
# TODO: Perform auto-merge if possible and update only necessary data
# See obj.extra_data["record_matches"] for data on matches
# FIXME: Just update entire record for now
obj.extra_data[extra_data_key] = obj.data
_prepare_update_payload.__doc__ = (
'Prepare the update payload, extra_data_key=%s.' % extra_data_key)
return _prepare_update_payload
@with_debug_logging
def refextract(obj, eng):
uri = get_pdf_in_workflow(obj)
source = get_value(obj.data, 'acquisition_source.source')
if uri:
try:
journal_kb_path = current_app.config.get('REFEXTRACT_JOURNAL_KB_PATH', None)
if journal_kb_path:
references = extract_references(uri, source, {'journals': journal_kb_path})
else:
references = extract_references(uri, source)
if references:
obj.data['references'] = references
obj.log.info('Extracted %d references', len(references))
else:
obj.log.info('No references extracted')
except TimeoutError:
obj.log.error('Timeout when extracting references from the PDF')
else:
obj.log.error('Not able to download and process the PDF')
|
kaplun/inspire-next
|
inspirehep/modules/workflows/tasks/actions.py
|
Python
|
gpl-3.0
| 8,467
|
# coding: utf-8
import tweepy
import urllib2
import json
import time
import datetime
import random
consumer_key = 'jNSA6i7eIA564BRMRdMVCDrkF'
consumer_secret = 'L3908LNoYSh8bwIIhgtEJ6PuaFdXGBENaZdsVrG0iRJFMpWa4u'
access_token = '3306456200-8Y9UUXPxM3t6B6KCL9eJxjC1lREmEtQ8zdkBR28'
access_token_secret = 'nd8L1PWQ4CxaBP07N1U548uWz1ei5Q7Bck65MhjsRsRIW'
wu_api_key = 'c925e374af6c3fc2'
wu_api_url = 'http://api.wunderground.com/api/'
wu_api_query = '/conditions/q/NY/Brooklyn.json'
#tweepy code to handle authorizing the API access
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#Get the weather information from Wunderground API in JSON format
def check_weather():
f = urllib2.urlopen(wu_api_url + wu_api_key + wu_api_query)
json_string = f.read()
parsed_json = json.loads(json_string)
#print json_string
#print parsed_json
current_temp = parsed_json['current_observation']['temp_f']
current_icon = parsed_json['current_observation']['icon_url']
observation_time = parsed_json['current_observation']['local_epoch']
observation_time = datetime.datetime.fromtimestamp(float(observation_time))
fmt = '%-I:%M' #format the time
observation_time = observation_time.strftime(fmt)
current_time = datetime.datetime.now().time()
tt_list = [current_temp, current_time, current_icon]
return tt_list
#save the weather icon and upload it to the profile
def icon_update(current_icon):
icon_file = urllib2.urlopen(current_icon)
icon_file = icon_file.read()
with open("icon.gif", "wb") as code:
code.write(icon_file)
api.update_profile_image('icon.gif')
def tweet_time_temp():
secs = random.randint(2700,5400) #selects a random number of seconds between 45mins and 90mins
tt_list = check_weather() #gets all the new info from Wunderground
current_temp = tt_list[0]
current_time = tt_list[1]
time_fmt = '%-I:%M %p'
current_time = current_time.strftime(time_fmt)
current_status = "The time is %s and the temperature is %s°F." % (str(current_time), current_temp)
current_icon = tt_list[2]
print current_status
print secs
icon_update(current_icon)
api.update_status(status=current_status) #updates the tweet feed via the API
time.sleep(secs) #pauses for the random number of seconds
while True:
tweet_time_temp()
|
tomleger/time_and_temp
|
tweepytext.py
|
Python
|
mit
| 2,337
|
# Copyright 2017 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import fixtures as fx
import testtools
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
"""Test request logging middleware under various conditions.
The request logging middleware is needed when running under something
other than eventlet. While Nova grew up on eventlet, and it's wsgi
server, it meant that our user facing data (the log stream) was a mix
of what Nova was emitting, and what eventlet.wsgi was emitting on our
behalf. When running under uwsgi we want to make sure that we have
equivalent coverage.
All these tests use GET / to hit an endpoint that doesn't require the
database setup. We have to do a bit of mocking to make that work.
"""
class TestRequestLogMiddleware(testtools.TestCase):
def setUp(self):
super(TestRequestLogMiddleware, self).setUp()
# this is the minimal set of magic mocks needed to convince
# the API service it can start on it's own without a database.
mocks = ['nova.objects.Service.get_by_host_and_binary',
'nova.objects.Service.create']
self.stdlog = fixtures.StandardLogging()
self.useFixture(self.stdlog)
for m in mocks:
p = mock.patch(m)
self.addCleanup(p.stop)
p.start()
@mock.patch('nova.api.openstack.requestlog.RequestLog._should_emit')
def test_logs_requests(self, emit):
"""Ensure requests are logged.
Make a standard request for / and ensure there is a log entry.
"""
emit.return_value = True
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/', strip_version=True)
# the content length might vary, but the important part is
# what we log is what we return to the user (which turns out
# to excitingly not be the case with eventlet!)
content_length = resp.headers['content-length']
log1 = ('INFO [nova.api.openstack.requestlog] 127.0.0.1 '
'"GET /" status: 200 len: %s' % content_length)
self.assertIn(log1, self.stdlog.logger.output)
@mock.patch('nova.api.openstack.requestlog.RequestLog._should_emit')
def test_logs_mv(self, emit):
"""Ensure logs register microversion if passed.
This makes sure that microversion logging actually shows up
when appropriate.
"""
emit.return_value = True
self.useFixture(conf_fixture.ConfFixture())
# NOTE(sdague): all these tests are using the
self.useFixture(
fx.MonkeyPatch(
'nova.api.openstack.compute.versions.'
'Versions.support_api_request_version',
True))
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
api.microversion = '2.25'
resp = api.api_request('/', strip_version=True)
content_length = resp.headers['content-length']
log1 = ('INFO [nova.api.openstack.requestlog] 127.0.0.1 '
'"GET /" status: 200 len: %s microversion: 2.25 time:' %
content_length)
self.assertIn(log1, self.stdlog.logger.output)
@mock.patch('nova.api.openstack.compute.versions.Versions.index')
@mock.patch('nova.api.openstack.requestlog.RequestLog._should_emit')
def test_logs_under_exception(self, emit, v_index):
"""Ensure that logs still emit under unexpected failure.
If we get an unexpected failure all the way up to the top, we should
still have a record of that request via the except block.
"""
emit.return_value = True
v_index.side_effect = Exception("Unexpected Error")
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
api.api_request('/', strip_version=True)
log1 = ('INFO [nova.api.openstack.requestlog] 127.0.0.1 "GET /"'
' status: 500 len: 0 microversion: - time:')
self.assertIn(log1, self.stdlog.logger.output)
@mock.patch('nova.api.openstack.requestlog.RequestLog._should_emit')
def test_no_log_under_eventlet(self, emit):
"""Ensure that logs don't end up under eventlet.
We still set the _should_emit return value directly to prevent
the situation where eventlet is removed from tests and this
preventing that.
NOTE(sdague): this test can be deleted when eventlet is no
longer supported for the wsgi stack in Nova.
"""
emit.return_value = False
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
api.api_request('/', strip_version=True)
self.assertNotIn("nova.api.openstack.requestlog",
self.stdlog.logger.output)
|
rahulunair/nova
|
nova/tests/unit/api/openstack/test_requestlog.py
|
Python
|
apache-2.0
| 5,673
|
#!/usr/bin/env python
from app import telomere
telomere.run(host='0.0.0.0', debug=True)
|
rabramley/telomere
|
run.py
|
Python
|
mit
| 89
|
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.core.mail import send_mail
# This view displays the repairs
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/manager/login')
if request.user.is_staff == False:
return HttpResponseRedirect('/manager/')
if request.urlparams[0] == "email":
completed = pmod.RepairStatus.objects.get(status="Completed")
repairs = pmod.Repair.objects.filter(status=completed, paid=False)
for i in repairs:
message = i.user.first_name + " " + i.user.last_name + ":\r\n" + "The repair for the " + i.description + " with the " + i.problem + " has been completed.\r\n Please return to the store to pick it up when you can.\r\nThank you!\r\n\r\nHexPhotos"
send_mail('HexPhotos Repair Completed', message, 'no-reply@hexphotos.com', [i.user.email], fail_silently=True)
'''Shows the list of catalog_products'''
if request.urlparams[0] == "paid":
Objects = pmod.Repair.objects.filter(paid=True)
else:
Objects = pmod.Repair.objects.filter(paid=False)
tvars = {
'Objects': Objects,
}
return templater.render_to_response(request, 'repairs.html', tvars)
|
odrolliv13/Hex-Photos
|
manager/views/repairs.py
|
Python
|
apache-2.0
| 1,353
|
import sys
import yaml
import RPi.GPIO as GPIO
import time
import datetime
import structlog
from pathlib import Path
import csv
import os
import glob
from daemon import Daemon
from bmp183 import bmp183
import Adafruit_DHT
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import psycopg2
# from RPi_AS3935 import RPi_AS3935 # for lightning, not working
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def windEventHandler(pin):
# print("handling wind speed event")
data.windspeed += 1
def rainEventHandler(pin):
# print("handling rain event")
global lastrainevent
now = datetime.datetime.now()
diff = (now-lastrainevent).total_seconds()
if (diff > 0.25):
data.rain += 1
lastrainevent = datetime.datetime.now()
def handle_interrupt(channel):
time.sleep(0.003)
global sensor
reason = sensor.get_interrupt()
if reason == 0x01:
print("Noise level too high - adjusting")
sensor.raise_noise_floor()
elif reason == 0x04:
print("Disturber detected - masking")
sensor.set_mask_disturber(True)
elif reason == 0x08:
now = datetime.now().strftime('%H:%M:%S - %Y/%m/%d')
distance = sensor.get_distance()
print("We sensed lightning!")
print("It was " + str(distance) + "km away. (%s)" % now)
print("")
class CSVWriter:
def __init__(self, filename='test.csv', header=[]):
print("CSV init")
newfile = False
self._header = header
self._fileday = None
self._filename = filename
outfile = Path(filename)
if not outfile.is_file():
# File doesn't exist, will be created, print header too
print("CSV init file not exist")
newfile = True
self.fn = open(filename, 'a')
self.fileday = int(datetime.datetime.utcnow().day)
# print("CSV Date:" + str(self._day))
self.csv = csv.writer(self.fn, delimiter=',')
# print("Size" + str(len(header)))
if newfile:
# new file so print header
print("CSV init print header")
self.csv.writerow(self._header)
def writedata(self, dataobj):
print("CSV write")
day = int(dataobj[0][8:10]) # parse string of 1st arrat item
if (int(self.fileday) != int(day)):
print("CSV not equal ")
# usually at 11:59 so UTCnow will get next day.
dt = datetime.datetime.utcnow()
filename = "data-"+dt.strftime("%Y-%m-%d") + ".csv"
print("Newfile: " + filename)
print("Self Day vs Arg Day:" + str(self.fileday) + "-" + str(day))
self.fn.close()
print("CSV after close")
self = CSVWriter(filename, self._header)
print("CSV new self")
self.fileday = int(day)
# self.csv.writerow(self._header)
# self.fileday = int(datetime.datetime.utcnow().day)
# self._day = int(datetime.datetime.utcnow().day)
print("CSV writerow")
self.csv.writerow(dataobj)
print("After:" + str(self.fileday))
self.fn.flush()
# print("csv write")
return self
def __del__(self):
print("CSV Del")
self.fn.close()
@property
def fileday(self):
# Day of Month for rotating CSV file
"""getting"""
print("Fileday get")
return int(self._fileday)
@fileday.setter
def fileday(self, value):
print("Fileday setter: "+str(value))
if int(value) is not None and 0 <= int(value) <= 31:
self._fileday = int(value)
else:
self._fileday = None
class StationConfig:
"""Configuration parser"""
def __init__(self, file='config.yaml'):
with open(file, 'r') as ymlfile:
try:
self.configs = yaml.load(ymlfile)
except yaml.YAMLError as exc:
sys.exit("Fatal: Config file cannot be parsed as correct YAML")
class WeatherDaemon(Daemon):
def run(self):
while True:
time.sleep(.001)
class WeatherData:
def __init__(self):
self._timestamp = datetime.datetime.utcnow()
self._tdelta = 0.00
self._temp = 0
self._pressure = 0
self._humidityDHT = None
self._temperatureDHT = None
self._ds18b20temp = None
self._light = 0
self._winddir = 0
self._windspeed = 0
self._rain = 0
self._air1 = 0
self._air2 = 0
self._soilmoisture = 0
self._dataformatversion = 1
self._DHTsuccess = 0
@property
def timeUTC(self):
s = self._timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')
return s[:-3]
@property
def pressureMillibar(self):
# Raw is Pascals, divide by 100 to get millibar
"""getting"""
return "%0.1f" % ((self._pressure/100),)
@property
def pressureInchesHG(self):
# Raw is Pascals, divide by 3389.39 you'll get inches-Hg.
"""getting"""
return "%0.2f" % ((self._pressure/3389.39),)
@property
def pressure(self):
# Raw is Pascals
"""getting"""
return "%0.2f" % (self._pressure)
@pressure.setter
def pressure(self, value):
if value is not None and 0.0 <= value <= 100000.0:
self._pressure = value
else:
self._pressure = 0.0
@property
def air1(self):
"""getting"""
return self._air1
@air1.setter
def air1(self, value):
if value is not None and 0.0 <= value <= 1024.0:
self._air1 = value
else:
self._air1 = 0.0
@property
def windspeed(self):
"""getting"""
return self._windspeed
@windspeed.setter
def windspeed(self, throwaway):
self._windspeed += 1
@property
def rain(self):
"""getting"""
return self._rain
@rain.setter
def rain(self, throwaway):
self._rain += 1
@property
def light(self):
"""getting"""
return self._light
@light.setter
def light(self, value):
if value is not None and 0.0 <= value <= 1024.0:
self._light = value
else:
self._light = 0.0
@property
def winddir(self):
"""getting"""
return self._winddir
@winddir.setter
def winddir(self, value):
if value is not None and 0.0 <= value <= 1024.0:
self._winddir = value
else:
self._winddir = 0.0
@property
def air2(self):
"""getting"""
return self._air2
@air2.setter
def air2(self, value):
if value is not None and 0.0 <= value <= 1024.0:
self._air2 = value
else:
self._air2 = 0.0
@property
def soilmoisture(self):
"""getting"""
return self._soilmoisture
@soilmoisture.setter
def soilmoisture(self, value):
if value is not None and 0.0 <= value <= 1024.0:
self._soilmoisture = value
else:
self._soilmoisture = 0.0
@property
def dataformatversion(self):
"""getting"""
return self._dataformatversion
@property
def ds18b20temp(self):
"""getting"""
return self._ds18b20temp
@ds18b20temp.setter
def ds18b20temp(self, value):
if value is not None and -40.0 <= value <= 200.0:
self._ds18b20temp = "%0.2f" % (value,)
else:
self._ds18b20temp = 0.0
@property
def humidityDHT(self):
"""getting"""
return self._humidityDHT
@property
def temperatureDHT(self):
"""getting"""
return self._temperatureDHT
@humidityDHT.setter
def humidityDHT(self, value):
if value is not None and 0.0 <= value <= 100.0:
self._humidityDHT = value
else:
self._humidityDHT = 0.0
@temperatureDHT.setter
def temperatureDHT(self, value):
# Store in F, so convert from C:
if value is not None and 0.0 <= value <= 200.0:
self._temperatureDHT = "%0.2f" % ((value*1.8)+32,)
else:
self._temperatureDHT = 0.0
@property
def temp(self):
"""getting"""
return "%0.1f" % (self._temp)
@property
def tempF(self):
"""getting"""
return "%0.1f" % ((self._temp*1.8)+32,)
@property
def tempC(self):
"""getting"""
return "%0.1f" % (self._temp)
@temp.setter
def temp(self, value):
# Store in F, so convert from C:
if value is not None and 0.0 <= value <= 200.0:
self._temp = value
else:
self._temp = 0.0
@property
def tdelta(self):
"""getting"""
return "%0.2f" % (self._tdelta)
@tdelta.setter
def tdelta(self, value):
# Store in F, so convert from C:
if value is not None and 0.00 <= value <= 200.00:
self._tdelta = value
else:
self._tdelta = 0.00
def exportdata(self):
return [self.timeUTC, self.tdelta, self.tempF, self.pressureMillibar,
self.temperatureDHT, self.humidityDHT, self.light,
self.winddir, self.getwinddir(self.winddir), self.windspeed,
self.rain, self.ds18b20temp, self.soilmoisture, self.air1,
self.air2, self.dataformatversion]
def day(self):
s = self._timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')
return int(s[8:10])
def describedata(self, v=None):
v = v or self._dataformatversion
if v == 1:
return ["Timestamp UTC", "Timedelta sec", "BMP Temp F",
"BMP Pressure Millibar", "DHT Temp C", "DHT Humidity %",
"Light Count", "Winddir ", "Winddir Lookup",
"Windspeed Count", "Rain Count", "Soil Temp",
"Soil Moisture", "Air Sensor 1", "Air Sensor 2",
"Data Format Version"]
else:
return [v]
def getwinddir(self, windreading):
winddirtable = {
(867, 917): 'W',
(816, 866): 'NW',
(770, 815): 'WNW',
(714, 769): 'N',
(644, 713): 'NWN',
(598, 643): 'SW',
(521, 597): 'SWW',
(431, 520): 'NE',
(348, 430): 'NNE',
(269, 347): 'S',
(218, 268): 'SSW',
(159, 217): 'SE',
(112, 158): 'SES',
(90, 112): 'E',
(76, 89): 'NEE',
(33, 75): 'ESE'
}
for key in winddirtable:
if key[0] < windreading < key[1]:
return winddirtable[key]
@staticmethod
def ds18b20_read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
@staticmethod
def ds18b20_read_temp():
lines = WeatherData.ds18b20_read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.001)
lines = WeatherData.ds18b20_read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
if __name__ == '__main__':
logger = structlog.get_logger()
GPIO.setwarnings(False)
logger.info("Loading Configs...")
cfg = StationConfig('config.yaml')
# cfg.dumpcfg()
logger.info("Getting Data Ready:")
# Get our CSV Writer ready, pass in the Data Header for 1st line
dataout = CSVWriter("data-" +
datetime.datetime.utcnow().strftime("%Y-%m-%d") +
".csv", WeatherData().describedata())
try:
connect_str = "dbname='weatherstation' user='weatherstation' " + \
"host='127.0.0.1' password='weatherstation'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
# create a psycopg2 cursor that can execute queries
cursor = conn.cursor()
# create a new table with a single column called "name"
# cursor.execute("""CREATE TABLE tutorials (name char(40));""")
# run a SELECT statement - no data in there, but we can try it
# cursor.execute("""SELECT * from weatherdata""")
# rows = cursor.fetchall()
# print("fetch all: " + str(rows))
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# Sensors
logger.info("Getting Sensors Ready:")
# print(cfg.configs['bmp183']['pin-sck'])
bmp = bmp183(cfg.configs['bmp183']['pin-sck'],
cfg.configs['bmp183']['pin-sdo'],
cfg.configs['bmp183']['pin-sdi'],
cfg.configs['bmp183']['pin-cs'])
# mcp = Adafruit_MCP3008.MCP3008(clk=cfg.configs['mcp3008']['pin-clk'],
# cs=cfg.configs['mcp3008']['pin-cs'],
# miso=cfg.configs['mcp3008']['pin-miso'],
# mosi=cfg.configs['mcp3008']['pin-mosi'])
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# sensor = RPi_AS3935(address=0x00, bus=1)
# sensor.set_indoors(True)
# sensor.set_noise_floor(0)
# sensor.calibrate(tun_cap=0x0F)
data = WeatherData()
lastrainevent = datetime.datetime.now()
GPIO.setup(cfg.configs['windspeed']['pin'], GPIO.IN,
pull_up_down=GPIO.PUD_UP)
GPIO.setup(cfg.configs['raingauge']['pin'], GPIO.IN,
pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(cfg.configs['windspeed']['pin'], GPIO.FALLING)
GPIO.add_event_callback(cfg.configs['windspeed']['pin'], windEventHandler)
GPIO.add_event_detect(cfg.configs['raingauge']['pin'], GPIO.FALLING)
GPIO.add_event_callback(cfg.configs['raingauge']['pin'], rainEventHandler)
# GPIO.setup(cfg.configs['lightning']['pin'], GPIO.IN)
# GPIO.add_event_detect(cfg.configs['lightning']['pin'],
# GPIO.RISING, callback=handle_interrupt)
while True:
start = datetime.datetime.now()
# print(sorted(list(data._dumprow)))
bmp.measure_pressure()
data.temp = bmp.temperature
data.pressure = bmp.pressure
# print("Temperature: " + data.tempF + " deg F")
# print("Temperature: " + data.tempC + " deg C")
# print("Pressure : " + data.pressureMillibar + " millibar")
# print("Pressure : " + data.pressureInchesHG + " inches-Hg")
(tempc, data.ds18b20temp) = data.ds18b20_read_temp()
# print("1-wire temp: " + str(data.ds18b20temp))
humidity, temperature = Adafruit_DHT.read(11, 5)
if humidity is not None and temperature is not None:
data.humidityDHT = humidity
data.temperatureDHT = temperature
# print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(
# temperature, humidity))
else:
# if datalast is not None:
# humidity = datalast.humidityDHT
# temperature = datalast.temperatureDHT
print('Failed to get reading. Try again!')
# Analog Readings
values = [0]*5
for i in range(5):
# The read_adc function will get the value
# of the specified channel (0-7).
values[i] = mcp.read_adc(i)
# Print the ADC values.
# print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} |\
# {4:>4} '.format(*range(8)))
# print('-' * 57)
# print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} |\
# {4:>4} '.format(*values))
data.air1 = values[cfg.configs['air1']['analog-ch']]
data.air2 = values[cfg.configs['air2']['analog-ch']]
data.light = values[cfg.configs['lightresistor']['analog-ch']]
data.soilmoisture = values[cfg.configs['soilmoisture']['analog-ch']]
data.winddir = values[cfg.configs['winddir']['analog-ch']]
# print("Wind: " + data.getwinddir(data.winddir))
# Output:
now = datetime.datetime.utcnow()
earlier = datetime.datetime.strptime(data.timeUTC,
'%Y-%m-%d %H:%M:%S.%f')
# print("St Sec: " + str(e.second) + " " + str(e.microsecond))
# print("Now Sec: " + str(now.second) + " " + str(now.microsecond))
data.tdelta = (now - earlier).total_seconds()
print("Delta:" + str(data.tdelta))
print("Data:")
print(list(data.exportdata()))
sql = """INSERT into weatherdata (measurement, tdelta, bmp_temp_f,
bmp_pressuer_millibar, dht_temp_f, dht_humidity_perc,
light_reading, wind_dir_value, wind_dir_lookup,
wind_speed_count, rain_count, soil_temp, soil_humidity,
air_1, air_2, data_version) values
(%s,%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s)"""
insertdata = list(data.exportdata())
try:
cursor.execute(sql, insertdata)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
print("SQL statement:" + sql)
print("Data: " + str(insertdata))
conn.rollback()
dataout = dataout.writedata(data.exportdata())
# dataout.fileday = now.day
# print ("Desc: ")
# print(data.describedata())
# datalast = data
data = WeatherData()
# time.sleep(0.15)
exit(0)
daemon = WeatherDaemon('/tmp/weatherdaemon.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
logger.info("Starting up...")
daemon.start()
elif 'stop' == sys.argv[1]:
logger.info("Shutting down...")
daemon.stop()
elif 'restart' == sys.argv[1]:
logger.info("Restarting...")
daemon.restart()
else:
print("Unknown command")
sys.exit(2)
# sys.exit(0)
else:
print("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
# Date Calcs
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
logger.debug("Tomorrow: " + str(tomorrow.month) + " " + str(tomorrow.day))
|
kmkingsbury/raspberrypi-weather-station
|
station-code/collectweather.py
|
Python
|
apache-2.0
| 18,855
|
"""
Copyright 2015
FOSSEE, IIT Bombay
Use is subject to license terms.
@version 0.2, 15-Aug-2015
@author Manoj G
@email manoj.p.gudi@gmail.com
A generic block to call scilab functions which uses sciscipy wrapper
"""
import gras
import numpy
from scilab import Scilab
class Generic(gras.Block):
'''
Scilab Generic class
- inherits gras.Block
- used to call scilab functions
window_size is size of inputs the function will be called
'''
def __init__(self, func_name, window_size):
gras.Block.__init__(self,
name="generic",
in_sig=[numpy.float32],
out_sig=[numpy.float32])
self.window_size = window_size
self.func_name = func_name
self.scilab_instance = Scilab()
def is_window_integral(self, input_item, window):
"""
Check if value of window is integral of length of input source vector
For cases like -> input = [3 , 4, 5 ,6] & window = 3
"""
if (len(input_item) % window ):
raise Exception("Value of Window should be an integral value of length of input items")
def work(self, input_items, output_items):
# Limit output_items to just the size of window
output_items[0][:] = output_items[0][:self.window_size]
# Check number of input_instances
n_input_items = len(input_items)
# Create output string instance which will be evaluated
out_eval_string = 'eval("self.scilab_instance.'+self.func_name+'('
# Iterate for n_input_items
for i in range(n_input_items):
# Check window condition
self.is_window_integral(input_items[i][:], self.window_size)
# If the window is greater than 1,
# input_items[i][:self.window_size] looks like [1 2 3 4 5] which is err for python since it requires comma as delimiters
if self.window_size == 1:
"""
The hell is going on here?
"""
out_eval_string = out_eval_string + str(input_items[i][:self.window_size]) + ","
else:
print 'IN',str(input_items[i][:self.window_size])
out_eval_string = out_eval_string + (str(input_items[i][:self.window_size].tolist())) + "," # Replace 10spaces with a singe comma
out_eval_string = out_eval_string.rstrip(",") + ')")'
print "From Scilab",str(out_eval_string)
# for functions like sin
if n_input_items == 1 and self.window_size == 1:
output_items[0][:self.window_size] = eval(out_eval_string)
else:
output_items[0] = eval(out_eval_string)
print "OUT ",output_items[0]
#Write a for loop for n_inputs
for i in range(n_input_items):
self.consume(i,self.window_size) # Consume from port 0 input_items
self.produce(0,self.window_size) # Produce from port 0 output_items
|
manojgudi/sandhi
|
modules/gr36/gr-scigen/python/scigen.py
|
Python
|
gpl-3.0
| 3,017
|
# Copyright 2010 Google Inc.
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Handles authentication required to AWS and GS
"""
import base64
import boto
import boto.auth_handler
import boto.exception
import boto.plugin
import boto.utils
import copy
import datetime
from email.utils import formatdate
import hmac
import os
import posixpath
from boto.compat import urllib, encodebytes
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
except ImportError:
import sha
sha256 = None
# Region detection strings to determine if SigV4 should be used
# by default.
SIGV4_DETECT = [
'.cn-',
# In eu-central we support both host styles for S3
'.eu-central',
'-eu-central',
]
class HmacKeys(object):
"""Key based Auth handler helper."""
def __init__(self, host, config, provider):
if provider.access_key is None or provider.secret_key is None:
raise boto.auth_handler.NotReadyToAuthenticate()
self.host = host
self.update_provider(provider)
def update_provider(self, provider):
self._provider = provider
self._hmac = hmac.new(self._provider.secret_key.encode('utf-8'),
digestmod=sha)
if sha256:
self._hmac_256 = hmac.new(self._provider.secret_key.encode('utf-8'),
digestmod=sha256)
else:
self._hmac_256 = None
def algorithm(self):
if self._hmac_256:
return 'HmacSHA256'
else:
return 'HmacSHA1'
def _get_hmac(self):
if self._hmac_256:
digestmod = sha256
else:
digestmod = sha
return hmac.new(self._provider.secret_key.encode('utf-8'),
digestmod=digestmod)
def sign_string(self, string_to_sign):
new_hmac = self._get_hmac()
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).decode('utf-8').strip()
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
del pickled_dict['_hmac']
del pickled_dict['_hmac_256']
return pickled_dict
def __setstate__(self, dct):
self.__dict__ = dct
self.update_provider(self._provider)
class AnonAuthHandler(AuthHandler, HmacKeys):
"""
Implements Anonymous requests.
"""
capability = ['anon']
def __init__(self, host, config, provider):
super(AnonAuthHandler, self).__init__(host, config, provider)
def add_auth(self, http_request, **kwargs):
pass
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by S3 and GS."""
capability = ['hmac-v1', 's3']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV1Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
string_to_sign = boto.utils.canonical_string(method, auth_path,
headers, None,
self._provider)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac))
boto.log.debug('Signature:\n%s' % auth)
headers['Authorization'] = auth
class HmacAuthV2Handler(AuthHandler, HmacKeys):
"""
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV2Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
capability = ['hmac-v3', 'route53', 'ses']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
headers['X-Amzn-Authorization'] = s
class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
"""
Implements the new Version 3 HMAC authorization used by DynamoDB.
"""
capability = ['hmac-v3-http']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {'Host': self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign])
return '\n'.join(l)
def string_to_sign(self, http_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
headers_to_sign = self.headers_to_sign(http_request)
canonical_headers = self.canonical_headers(headers_to_sign)
string_to_sign = '\n'.join([http_request.method,
http_request.auth_path,
'',
canonical_headers,
'',
http_request.body])
return string_to_sign, headers_to_sign
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
req.headers['X-Amz-Date'] = formatdate(usegmt=True)
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
hash_value = sha256(string_to_sign.encode('utf-8')).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers['X-Amzn-Authorization'] = s
class HmacAuthV4Handler(AuthHandler, HmacKeys):
"""
Implements the new Version 4 HMAC authorization.
"""
capability = ['hmac-v4']
def __init__(self, host, config, provider,
service_name=None, region_name=None):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
# You can set the service_name and region_name to override the
# values which would otherwise come from the endpoint, e.g.
# <service>.<region>.amazonaws.com.
self.service_name = service_name
self.region_name = region_name
def _sign(self, key, msg, hex=False):
if not isinstance(key, bytes):
key = key.encode('utf-8')
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
host_header_value = self.host_header(self.host, http_request)
headers_to_sign = {'Host': host_header_value}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
if isinstance(value, bytes):
value = value.decode('utf-8')
headers_to_sign[name] = value
return headers_to_sign
def host_header(self, host, http_request):
port = http_request.port
secure = http_request.protocol == 'https'
if ((port == 80 and not secure) or (port == 443 and secure)):
return host
return '%s:%s' % (host, port)
def query_string(self, http_request):
parameter_names = sorted(http_request.params.keys())
pairs = []
for pname in parameter_names:
pval = boto.utils.get_utf8_value(http_request.params[pname])
pairs.append(urllib.parse.quote(pname, safe='') + '=' +
urllib.parse.quote(pval, safe='-_~'))
return '&'.join(pairs)
def canonical_query_string(self, http_request):
# POST requests pass parameters in through the
# http_request.body field.
if http_request.method == 'POST':
return ""
l = []
for param in sorted(http_request.params):
value = boto.utils.get_utf8_value(http_request.params[param])
l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'),
urllib.parse.quote(value, safe='-_.~')))
return '&'.join(l)
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
canonical = []
for header in headers_to_sign:
c_name = header.lower().strip()
raw_value = str(headers_to_sign[header])
if '"' in raw_value:
c_value = raw_value.strip()
else:
c_value = ' '.join(raw_value.strip().split())
canonical.append('%s:%s' % (c_name, c_value))
return '\n'.join(sorted(canonical))
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in headers_to_sign]
l = sorted(l)
return ';'.join(l)
def canonical_uri(self, http_request):
path = http_request.auth_path
# Normalize the path
# in windows normpath('/') will be '\\' so we chane it back to '/'
normalized = posixpath.normpath(path).replace('\\', '/')
# Then urlencode whatever's left.
encoded = urllib.parse.quote(normalized)
if len(path) > 1 and path.endswith('/'):
encoded += '/'
return encoded
def payload(self, http_request):
body = http_request.body
# If the body is a file like object, we can use
# boto.utils.compute_hash, which will avoid reading
# the entire body into memory.
if hasattr(body, 'seek') and hasattr(body, 'read'):
return boto.utils.compute_hash(body, hash_algorithm=sha256)[0]
elif not isinstance(body, bytes):
body = body.encode('utf-8')
return sha256(body).hexdigest()
def canonical_request(self, http_request):
cr = [http_request.method.upper()]
cr.append(self.canonical_uri(http_request))
cr.append(self.canonical_query_string(http_request))
headers_to_sign = self.headers_to_sign(http_request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
cr.append(self.payload(http_request))
return '\n'.join(cr)
def scope(self, http_request):
scope = [self._provider.access_key]
scope.append(http_request.timestamp)
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def split_host_parts(self, host):
return host.split('.')
def determine_region_name(self, host):
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
elif len(parts) > 1:
if parts[1] == 'us-gov':
region_name = 'us-gov-west-1'
else:
if len(parts) == 3:
region_name = 'us-east-1'
else:
region_name = parts[1]
else:
region_name = parts[0]
return region_name
def determine_service_name(self, host):
parts = self.split_host_parts(host)
if self.service_name is not None:
service_name = self.service_name
else:
service_name = parts[0]
return service_name
def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
# The service_name and region_name either come from:
# * The service_name/region_name attrs or (if these values are None)
# * parsed from the endpoint <service>.<region>.amazonaws.com.
region_name = self.determine_region_name(http_request.host)
service_name = self.determine_service_name(http_request.host)
http_request.service_name = service_name
http_request.region_name = region_name
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, http_request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(http_request.headers['X-Amz-Date'])
sts.append(self.credential_scope(http_request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, http_request, string_to_sign):
key = self._provider.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
http_request.timestamp)
k_region = self._sign(k_date, http_request.region_name)
k_service = self._sign(k_region, http_request.service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, req, **kwargs):
"""
Add AWS4 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
now = datetime.datetime.utcnow()
req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
qs = self.query_string(req)
qs_to_post = qs
# We do not want to include any params that were mangled into
# the params if performing s3-sigv4 since it does not
# belong in the body of a post for some requests. Mangled
# refers to items in the query string URL being added to the
# http response params. However, these params get added to
# the body of the request, but the query string URL does not
# belong in the body of the request. ``unmangled_resp`` is the
# response that happened prior to the mangling. This ``unmangled_req``
# kwarg will only appear for s3-sigv4.
if 'unmangled_req' in kwargs:
qs_to_post = self.query_string(kwargs['unmangled_req'])
if qs_to_post and req.method == 'POST':
# Stash request parameters into post body
# before we generate the signature.
req.body = qs_to_post
req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
req.headers['Content-Length'] = str(len(req.body))
else:
# Safe to modify req.path here since
# the signature will use req.auth_path.
req.path = req.path.split('?')[0]
if qs:
# Don't insert the '?' unless there's actually a query string
req.path = req.path + '?' + qs
canonical_request = self.canonical_request(req)
boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
string_to_sign = self.string_to_sign(req, canonical_request)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
signature = self.signature(req, string_to_sign)
boto.log.debug('Signature:\n%s' % signature)
headers_to_sign = self.headers_to_sign(req)
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)]
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
req.headers['Authorization'] = ','.join(l)
class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
"""
Implements a variant of Version 4 HMAC authorization specific to S3.
"""
capability = ['hmac-v4-s3']
def __init__(self, *args, **kwargs):
super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs)
if self.region_name:
self.region_name = self.clean_region_name(self.region_name)
def clean_region_name(self, region_name):
if region_name.startswith('s3-'):
return region_name[3:]
return region_name
def canonical_uri(self, http_request):
# S3 does **NOT** do path normalization that SigV4 typically does.
# Urlencode the path, **NOT** ``auth_path`` (because vhosting).
path = urllib.parse.urlparse(http_request.path)
# Because some quoting may have already been applied, let's back it out.
unquoted = urllib.parse.unquote(path.path)
# Requote, this time addressing all characters.
encoded = urllib.parse.quote(unquoted)
return encoded
def canonical_query_string(self, http_request):
# Note that we just do not return an empty string for
# POST request. Query strings in url are included in canonical
# query string.
l = []
for param in sorted(http_request.params):
value = boto.utils.get_utf8_value(http_request.params[param])
l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'),
urllib.parse.quote(value, safe='-_.~')))
return '&'.join(l)
def host_header(self, host, http_request):
port = http_request.port
secure = http_request.protocol == 'https'
if ((port == 80 and not secure) or (port == 443 and secure)):
return http_request.host
return '%s:%s' % (http_request.host, port)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
host_header_value = self.host_header(self.host, http_request)
headers_to_sign = {'Host': host_header_value}
for name, value in http_request.headers.items():
lname = name.lower()
# Hooray for the only difference! The main SigV4 signer only does
# ``Host`` + ``x-amz-*``. But S3 wants pretty much everything
# signed, except for authorization itself.
if lname not in ['authorization']:
headers_to_sign[name] = value
return headers_to_sign
def determine_region_name(self, host):
# S3's different format(s) of representing region/service from the
# rest of AWS makes this hurt too.
#
# Possible domain formats:
# - s3.amazonaws.com (Classic)
# - s3-us-west-2.amazonaws.com (Specific region)
# - bukkit.s3.amazonaws.com (Vhosted Classic)
# - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region)
# - s3.cn-north-1.amazonaws.com.cn - (Beijing region)
# - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Beijing region)
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
else:
# Classic URLs - s3-us-west-2.amazonaws.com
if len(parts) == 3:
region_name = self.clean_region_name(parts[0])
# Special-case for Classic.
if region_name == 's3':
region_name = 'us-east-1'
else:
# Iterate over the parts in reverse order.
for offset, part in enumerate(reversed(parts)):
part = part.lower()
# Look for the first thing starting with 's3'.
# Until there's a ``.s3`` TLD, we should be OK. :P
if part == 's3':
# If it's by itself, the region is the previous part.
region_name = parts[-offset]
# Unless it's Vhosted classic
if region_name == 'amazonaws':
region_name = 'us-east-1'
break
elif part.startswith('s3-'):
region_name = self.clean_region_name(part)
break
return region_name
def determine_service_name(self, host):
# Should this signing mechanism ever be used for anything else, this
# will fail. Consider utilizing the logic from the parent class should
# you find yourself here.
return 's3'
def mangle_path_and_params(self, req):
"""
Returns a copy of the request object with fixed ``auth_path/params``
attributes from the original.
"""
modified_req = copy.copy(req)
# Unlike the most other services, in S3, ``req.params`` isn't the only
# source of query string parameters.
# Because of the ``query_args``, we may already have a query string
# **ON** the ``path/auth_path``.
# Rip them apart, so the ``auth_path/params`` can be signed
# appropriately.
parsed_path = urllib.parse.urlparse(modified_req.auth_path)
modified_req.auth_path = parsed_path.path
if modified_req.params is None:
modified_req.params = {}
else:
# To keep the original request object untouched. We must make
# a copy of the params dictionary. Because the copy of the
# original request directly refers to the params dictionary
# of the original request.
copy_params = req.params.copy()
modified_req.params = copy_params
raw_qs = parsed_path.query
existing_qs = urllib.parse.parse_qs(
raw_qs,
keep_blank_values=True
)
# ``parse_qs`` will return lists. Don't do that unless there's a real,
# live list provided.
for key, value in existing_qs.items():
if isinstance(value, (list, tuple)):
if len(value) == 1:
existing_qs[key] = value[0]
modified_req.params.update(existing_qs)
return modified_req
def payload(self, http_request):
if http_request.headers.get('x-amz-content-sha256'):
return http_request.headers['x-amz-content-sha256']
return super(S3HmacAuthV4Handler, self).payload(http_request)
def add_auth(self, req, **kwargs):
if 'x-amz-content-sha256' not in req.headers:
if '_sha256' in req.headers:
req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256')
else:
req.headers['x-amz-content-sha256'] = self.payload(req)
updated_req = self.mangle_path_and_params(req)
return super(S3HmacAuthV4Handler, self).add_auth(updated_req,
unmangled_req=req,
**kwargs)
def presign(self, req, expires, iso_date=None):
"""
Presign a request using SigV4 query params. Takes in an HTTP request
and an expiration time in seconds and returns a URL.
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
if iso_date is None:
iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
region = self.determine_region_name(req.host)
service = self.determine_service_name(req.host)
params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % (
self._provider.access_key,
iso_date[:8],
region,
service
),
'X-Amz-Date': iso_date,
'X-Amz-Expires': expires,
'X-Amz-SignedHeaders': 'host'
}
if self._provider.security_token:
params['X-Amz-Security-Token'] = self._provider.security_token
headers_to_sign = self.headers_to_sign(req)
l = sorted(['%s' % n.lower().strip() for n in headers_to_sign])
params['X-Amz-SignedHeaders'] = ';'.join(l)
req.params.update(params)
cr = self.canonical_request(req)
# We need to replace the payload SHA with a constant
cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD'
# Date header is expected for string_to_sign, but unused otherwise
req.headers['X-Amz-Date'] = iso_date
sts = self.string_to_sign(req, cr)
signature = self.signature(req, sts)
# Add signature to params now that we have it
req.params['X-Amz-Signature'] = signature
return 'https://%s%s?%s' % (req.host, req.path,
urllib.parse.urlencode(req.params))
class STSAnonHandler(AuthHandler):
"""
Provides pure query construction (no actual signing).
Used for making anonymous STS request for operations like
``assume_role_with_web_identity``.
"""
capability = ['sts-anon']
def _escape_value(self, value):
# This is changed from a previous version because this string is
# being passed to the query string and query strings must
# be url encoded. In particular STS requires the saml_response to
# be urlencoded when calling assume_role_with_saml.
return urllib.parse.quote(value)
def _build_query_string(self, params):
keys = list(params.keys())
keys.sort(key=lambda x: x.lower())
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
pairs.append(key + '=' + self._escape_value(val.decode('utf-8')))
return '&'.join(pairs)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
qs = self._build_query_string(
http_request.params
)
boto.log.debug('query_string in body: %s' % qs)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
# This will be a POST so the query string should go into the body
# as opposed to being in the uri
http_request.body = qs
class QuerySignatureHelper(HmacKeys):
"""
Helper for Query signature based Auth handler.
Concrete sub class need to implement _calc_sigature method.
"""
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
params = http_request.params
params['AWSAccessKeyId'] = self._provider.access_key
params['SignatureVersion'] = self.SignatureVersion
params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(
http_request.params, http_request.method,
http_request.auth_path, http_request.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if http_request.method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
http_request.body = qs + '&Signature=' + urllib.parse.quote_plus(signature)
http_request.headers['Content-Length'] = str(len(http_request.body))
else:
http_request.body = ''
# if this is a retried request, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
http_request.path = http_request.path.split('?')[0]
http_request.path = (http_request.path + '?' + qs +
'&Signature=' + urllib.parse.quote_plus(signature))
class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Signature V0 Signing"""
SignatureVersion = 0
capability = ['sign-v0']
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_0')
hmac = self._get_hmac()
s = params['Action'] + params['Timestamp']
hmac.update(s.encode('utf-8'))
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
pairs.append(key + '=' + urllib.parse.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
"""
Provides Query Signature V1 Authentication.
"""
SignatureVersion = 1
capability = ['sign-v1', 'mturk']
def __init__(self, *args, **kw):
QuerySignatureHelper.__init__(self, *args, **kw)
AuthHandler.__init__(self, *args, **kw)
self._hmac_256 = None
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_1')
hmac = self._get_hmac()
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
hmac.update(key.encode('utf-8'))
val = boto.utils.get_utf8_value(params[key])
hmac.update(val)
pairs.append(key + '=' + urllib.parse.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Query Signature V2 Authentication."""
SignatureVersion = 2
capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs',
'sdb', 'iam', 'rds', 'sns', 'sqs', 'cloudformation']
def _calc_signature(self, params, verb, path, server_name):
boto.log.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path)
hmac = self._get_hmac()
params['SignatureMethod'] = self.algorithm()
if self._provider.security_token:
params['SecurityToken'] = self._provider.security_token
keys = sorted(params.keys())
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
pairs.append(urllib.parse.quote(key, safe='') + '=' +
urllib.parse.quote(val, safe='-_~'))
qs = '&'.join(pairs)
boto.log.debug('query string: %s' % qs)
string_to_sign += qs
boto.log.debug('string_to_sign: %s' % string_to_sign)
hmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(hmac.digest())
boto.log.debug('len(b64)=%d' % len(b64))
boto.log.debug('base64 encoded digest: %s' % b64)
return (qs, b64)
class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
"""
Query Signature V2 Authentication relocating signed query
into the path and allowing POST requests with Content-Types.
"""
capability = ['mws']
def add_auth(self, req, **kwargs):
req.params['AWSAccessKeyId'] = self._provider.access_key
req.params['SignatureVersion'] = self.SignatureVersion
req.params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(req.params, req.method,
req.auth_path, req.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if req.method == 'POST':
req.headers['Content-Length'] = str(len(req.body))
req.headers['Content-Type'] = req.headers.get('Content-Type',
'text/plain')
else:
req.body = ''
# if this is a retried req, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
req.path = req.path.split('?')[0]
req.path = (req.path + '?' + qs +
'&Signature=' + urllib.parse.quote_plus(signature))
def get_auth_handler(host, config, provider, requested_capability=None):
"""Finds an AuthHandler that is ready to authenticate.
Lists through all the registered AuthHandlers to find one that is willing
to handle for the requested capabilities, config and provider.
:type host: string
:param host: The name of the host
:type config:
:param config:
:type provider:
:param provider:
Returns:
An implementation of AuthHandler.
Raises:
boto.exception.NoAuthHandlerFound
"""
ready_handlers = []
auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
for handler in auth_handlers:
try:
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
' %s '
'Check your credentials' % (len(names), str(names)))
# We select the last ready auth handler that was loaded, to allow users to
# customize how auth works in environments where there are shared boto
# config files (e.g., /etc/boto.cfg and ~/.boto): The more general,
# system-wide shared configs should be loaded first, and the user's
# customizations loaded last. That way, for example, the system-wide
# config might include a plugin_directory that includes a service account
# auth plugin shared by all users of a Google Compute Engine instance
# (allowing sharing of non-user data between various services), and the
# user could override this with a .boto config that includes user-specific
# credentials (for access to user data).
return ready_handlers[-1]
def detect_potential_sigv4(func):
def _wrapper(self):
if os.environ.get('EC2_USE_SIGV4', False):
return ['hmac-v4']
if boto.config.get('ec2', 'use-sigv4', False):
return ['hmac-v4']
if hasattr(self, 'region'):
# If you're making changes here, you should also check
# ``boto/iam/connection.py``, as several things there are also
# endpoint-related.
if getattr(self.region, 'endpoint', ''):
for test in SIGV4_DETECT:
if test in self.region.endpoint:
return ['hmac-v4']
return func(self)
return _wrapper
def detect_potential_s3sigv4(func):
def _wrapper(self):
if os.environ.get('S3_USE_SIGV4', False):
return ['hmac-v4-s3']
if boto.config.get('s3', 'use-sigv4', False):
return ['hmac-v4-s3']
if hasattr(self, 'host'):
# If you're making changes here, you should also check
# ``boto/iam/connection.py``, as several things there are also
# endpoint-related.
for test in SIGV4_DETECT:
if test in self.host:
return ['hmac-v4-s3']
return func(self)
return _wrapper
|
tpodowd/boto
|
boto/auth.py
|
Python
|
mit
| 40,139
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2016 DennyZhang.com
## Licensed under MIT
## https://www.dennyzhang.com/wp-content/mit_license.txt
##
## File : ufw_add_or_remove.py
## Author : Denny <contact@dennyzhang.com>
## Description :
## Here we assume firewall should allow all traffic within the Intranet.
## Running a cluster of nodes in certain public cloud,
## like Linode, we don't have private subnet.
##
## Thus to add one node, we need:
## 1. Properly configure firewall in the new node
## 2. Change firewall rules in existing node, to allow incoming traffic
##
## Thus to remove an existing one node, we need:
## Go to all existing nodes, and remove firewall rules related to current node
## --
## Created : <2016-12-13>
## Updated: Time-stamp: <2016-12-26 12:09:01>
##-------------------------------------------------------------------
import os, sys
import argparse
import subprocess
################################################################################
## TODO: move to common library
def check_variable_is_set(val, msg):
if val is None:
sys.exit("%s" % (msg))
def remove_comment_in_str(string):
l = []
for line in string.split("\n"):
line = line.strip()
if line.startswith("#") or line == "":
continue
l.append(line)
return "\n".join(l)
################################################################################
def generate_ansible_host(string, fname):
with open(fname,'wab') as f:
for row in string.split("\n"):
f.write("%s\n" % row)
def initialize_ufw_status(ssh_ip, ssh_username, ssh_key, ssh_port, \
allow_ip_list, allow_port_list):
# Sample: ansible all -m script -a \
# "/root/ufw_add_node_to_cluster.sh 192.168.0.2,192.168.0.3 2702,80,443"
# TODO: use a temporary host file
tmp_host_fname = '/tmp/hosts_initialize'
generate_ansible_host("\n".join(allow_ip_list), tmp_host_fname)
command = "ansible all -i %s -m script -a '/root/ufw_add_node_to_cluster.sh' %s %s" % \
(tmp_host_fname, ",".join(allow_ip_list), ",".join(allow_port_list))
print("Initialize ufw status, command: %s" % (command))
# TODO: quit, if the command fails
p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
while True:
out = p.stderr.read(1)
if out == '' and p.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
# TODO: get status and remove file
def allow_src_ip(ssh_ip, ssh_username, ssh_key, ssh_port, src_ip):
# ansible all -m command -a "ufw allow from 192.168.0.3"
# TODO: use a temporary host file
tmp_host_fname = '/tmp/hosts_allow_ip'
generate_ansible_host(src_ip, tmp_host_fname)
command = "ansible all -i %s -m command -a 'ufw allow from %s'" \
% (tmp_host_fname, src_ip)
print("allow_src_ip. ssh_ip: %s, command: %s" % (ssh_ip, command))
# TODO: quit, if the command fails
p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
while True:
out = p.stderr.read(1)
if out == '' and p.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
# TODO: get status and remove file
def disallow_src_ip(ssh_ip, ssh_username, ssh_key, ssh_port, src_ip):
# ansible all -m command -a "ufw delete allow from 192.168.0.3"
command = "ansible all -m command -a 'ufw delete allow from %s'" % (src_ip)
print("disallow_src_ip. ssh_ip: %s, command: %s" % (ssh_ip, command))
# TODO: quit, if the command fails
p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
while True:
out = p.stderr.read(1)
if out == '' and p.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
# TODO: get status and remove file
################################################################################
# How To Test:
# export server_ip="192.168.0.5"
# export server_list="192.168.0.2 192.168.0.3 192.168.0.4"
# python ./ufw_add_or_remove.py --action add
# python ./ufw_add_or_remove.py --action remove
# Install dependency packages
# http://docs.ansible.com/ansible/intro_configuration.html
# https://serversforhackers.com/running-ansible-programmatically
# apt-get install -y python-pip
# pip install ansible
# ANSIBLE_CONFIG: ~/.ansible.cfg
##################################################
# [defaults]
# log_path = /var/log/ansible.log
# callback_plugins = /path/to/our/ansible/plugins/callback_plugins:~/.ansible/plugins/callback_plugins/:/usr/share/ansible_plugins/callback_plugins
#
# [ssh_connection]
# ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -o ControlMaster=auto -o ControlPersist=60s
# control_path = ~/.ansible/cp/ansible-ssh-%%h-%%p-%%r
##################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--action', default='',
required=True, help="Supported action: add or remove", type=str)
l = parser.parse_args()
if l.action not in ['add', 'remove']:
print("Error: supported action is either add or remove")
sys.exit(1)
script_fname="/root/ufw_add_node_to_cluster.sh"
# TODO: specify ssh private key file
ssh_key = "/var/lib/jenkins/.ssh"
ssh_username = "root"
ssh_port = "2702"
server_ip = os.environ.get('server_ip')
check_variable_is_set(server_ip, "ERROR: server_ip is not configured")
server_list_existing = os.environ.get('server_list')
check_variable_is_set(server_list_existing, "ERROR: server_list is not configured")
server_list_existing = server_list_existing.replace(" ", "\n")
server_list_existing = remove_comment_in_str(server_list_existing)
ssh_ip = server_ip
# TODO: get action from user input
if l.action == "add":
allow_port_list = ["2702", "80", "443", "22"]
print("Update ufw rules in new server: %s" % (ssh_ip))
initialize_ufw_status(ssh_ip, ssh_username, ssh_key, ssh_port, \
server_list_existing.split("\n"), allow_port_list)
for src_ip_tmp in server_list_existing.split("\n"):
allow_src_ip(ssh_ip, ssh_username, ssh_key, ssh_port, src_ip_tmp)
print("Update ufw rules in existing servers")
# TODO: use ansible python module to spped up the logic
for ssh_ip_tmp in server_list_existing.split("\n"):
allow_src_ip(ssh_ip_tmp, ssh_username, ssh_key, ssh_port, ssh_ip)
if l.action == "remove":
# TODO: use ansible python module to spped up the logic
for ssh_ip_tmp in server_list_existing.split("\n"):
disallow_src_ip(ssh_ip_tmp, ssh_username, ssh_key, ssh_port, ssh_ip)
## File : ufw_add_or_remove.py ends
|
TOTVS/mdmpublic
|
bash/ufw/ufw_add_or_remove.py
|
Python
|
bsd-2-clause
| 7,150
|
from model.group import Group
import pytest
def test_add_group(app, db, json_groups):
group = json_groups
with pytest.allure.step('Given a group list'):
old_groups = db.get_group_list()
with pytest.allure.step('When I add a group %s to the list' % group):
app.group.create(group)
#assert len (old_groups) + 1 == app.group.count()
with pytest.allure.step('Then the new group list is equal to the old list with the added group'):
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted (old_groups, key=Group.id_or_max ) == sorted (new_groups, key=Group.id_or_max)
|
potolock/proverca
|
test/test_add_group.py
|
Python
|
apache-2.0
| 690
|
# LibreShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of LibreShot Video Editor (http://launchpad.net/libreshot/).
#
# LibreShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os
import gtk
import time
from windows.SimpleGtkBuilderApp import SimpleGtkBuilderApp
from windows import preferences
from classes import project, messagebox
# init the foreign language
from language import Language_Init
class frmAddFiles(SimpleGtkBuilderApp):
def __init__(self, path="AddFiles.ui", root="frmAddFiles", domain="LibreShot", form=None, project=None, **kwargs):
SimpleGtkBuilderApp.__init__(self, os.path.join(project.UI_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self.frmAddFiles.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
self.frmAddFiles.set_select_multiple(True)
self.frmAddFiles.set_local_only(False)
self.form = form
self.project = project
#open the last used folder
default_folder = preferences.Settings.app_state["import_folder"]
if default_folder != "None":
self.frmAddFiles.set_current_folder(preferences.Settings.app_state["import_folder"])
self.frmAddFiles.show_all()
def on_btnCancel_clicked(self, widget, *args):
self.frmAddFiles.destroy()
def on_btnAdd_clicked(self, widget, *args):
files_to_add = self.frmAddFiles.get_filenames()
# get a reference to the language translate method
_ = self.project.translate
# create a unique session id, to prevent duplicate prompts
session = str(time.time())
# The total number of ok files selected (not folders)
total_ok_files = 0
# The total number of broken files selected (could not be imported)
total_broken_files = 0
# The total number of files already imported selected
total_duplicate_files = 0
# The total number of folders selected
total_folders = 0
try:
for file in files_to_add:
# add each file
result = self.project.project_folder.AddFile(file, session=session)
# parse the results and add to the total
total_ok_files += result[0]
total_broken_files += result[1]
total_duplicate_files += result[2]
total_folders += result[3]
# The number of total selected files, not including folders
total_files = total_ok_files + total_broken_files + total_duplicate_files
# print error messages (if needed)
if total_files == 0:
if total_folders == 1:
messagebox.show(_("Empty Folder "), _("The selected folder was empty."))
else:
messagebox.show(_("Empty Folders"), _("The selected folders were empty."))
else:
if total_files == total_broken_files:
if total_files == 1:
messagebox.show(_("Unsupported File Type"), _("LibreShot does not support this file type."))
else:
messagebox.show(_("Unsupported File Types"), _("LibreShot supports none of the file types of the selected files."))
elif total_files == total_duplicate_files:
if total_files == 1:
messagebox.show(_("Already Imported File"), _("The selected file has already been imported to the project."))
else:
messagebox.show(_("Already Imported Files"), _("All of the selected files have already been imported to the project."))
elif total_ok_files == 0:
messagebox.show(_("File Import Error"), _("The selected files either have an unsupported file type or have already been imported to the project."))
# set the project as modified
self.project.set_project_modified(is_modified=True, refresh_xml=False)
# refresh the main form
self.form.refresh_files()
except:
messagebox.show(_("Error"), _("There was an error importing the selected file(s)."))
#set the last used folder
current_folder = self.frmAddFiles.get_current_folder()
if current_folder is None:
current_folder = "None"
preferences.Settings.app_state["import_folder"] = current_folder
# clear and destroy this dialog
self.form.import_files_dialog = None
self.frmAddFiles.destroy()
def on_frmAddFiles_file_activated(self, widget, *args):
#call the open project method when a file is double clicked
self.on_btnAdd_clicked(widget, *args)
class frmReplaceFiles(SimpleGtkBuilderApp):
def __init__(self, path="AddFiles.ui", root="frmAddFiles", domain="LibreShot", form=None, project=None,clip=None, **kwargs):
SimpleGtkBuilderApp.__init__(self, os.path.join(project.UI_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self.frmAddFiles.set_title("LibreShot")
self.frmAddFiles.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
self.frmAddFiles.set_select_multiple(False)
self.form = form
self.project = project
self.clip = clip
self.frmAddFiles.show_all()
def on_btnCancel_clicked(self, widget, *args):
self.frmAddFiles.destroy()
def on_btnAdd_clicked(self, widget, *args):
replace_clip_with = self.frmAddFiles.get_filename()
try:
#does the new file already exist in the project?
file_object = self.project.project_folder.FindFile(replace_clip_with)
if not file_object:
#add the file to the project
self.project.project_folder.AddFile(replace_clip_with)
#this method does the actual replacement and modifies the project
self.form.replace_clip(self.clip,replace_clip_with)
except:
messagebox.show(_("Error"), _("There was an error importing the selected file(s)."))
#set the last used folder
current_folder = self.frmAddFiles.get_current_folder()
if current_folder is None:
current_folder = "None"
preferences.Settings.app_state["import_folder"] = current_folder
self.frmAddFiles.destroy()
def on_frmAddFiles_file_activated(self, widget, *args):
#call the open project method when a file is double clicked
self.on_btnAdd_clicked(widget, *args)
def get_replace_clip_with(self):
return self.replace_clip_with
def main():
frm_add_files = frmAddFiles()
frm_add_files.run()
if __name__ == "__main__":
main()
|
XXLRay/libreshot
|
libreshot/windows/AddFiles.py
|
Python
|
gpl-3.0
| 6,712
|
#!/usr/bin/env python
"""
Grid Tables Extension for Python-Markdown
=========================================
Add parsing of grid tables to Python-Markdown. These differ from simple tables
in that they can contain multi-lined text and (in my opinion) are cleaner
looking than simpe tables. They were inspired by reStructuredText's grid table
syntax. This extension was loosely based on the 'table' extension for
Python-Markdown by Waylan Limberg.
An example of a grid table:
+---------------+---------------+-----------------+
| First Header | Second Header | Third Header |
+===============+===============+=================+
| A cell that | A cell that spans multiple |
| spans | columns. |
| multiple rows +---------------+-----------------+
| | One, two cell | Red & blue cell |
+---------------+---------------+-----------------+
This should be generated as (but colspans and rowspans may not work at the moment):
<table>
<thead>
<tr>
<td>First Header</td>
<td>Second Header</td>
<td>Third Header</td>
</tr>
</thead>
<tbody>
<tr>
<td rowspan="2">A cell that spans multiple rows</td>
<td colspan="2">A cell that spans multiple columns</td>
</tr>
<tr>
<td>One, two cell</td>
<td>Red & blue cell</td>
</tr>
</tbody>
</table>
Licensed under GPLv3 by [Alexander Abbott aka Smartboy](http://smartboyssite.net)
Links referenced during creation of this plugin:
https://gist.github.com/1855764
http://packages.python.org/Markdown/extensions/api.html
https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/tables.py
http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#grid-tables
"""
import markdown
from markdown.util import etree
import re
class GridTableExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('grid-table',
GridTableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return GridTableExtension(configs=configs)
class GridTableCell(object):
"""
A single cell in a grid table. A cell's boundaries are determined by a
starting point in the top left (start_row and start_col), as well as a
width and a height. It also has a colspan and rowspan count for cells that
span multiple rows or columns.
"""
def __init__(self, start_row, start_col, width=1, height=1, colspan=1,
rowspan=1, text=""):
self.text = text
self._start_row = max(0, start_row)
self._start_col = max(0, start_col)
self._width = max(1, width)
self._height = max(1, height)
self._colspan = max(1, colspan)
self._rowspan = max(1, rowspan)
def __str__(self):
"""
For simplicity, the string representation is also the python code
representation.
"""
return self.__repr__()
def __repr__(self):
"""
This is the python representation of the cell. If ran with eval, the
output from this function would create a duplicate instance of this
class.
"""
retval = "GridTableCell(start_row={}, start_col={}, width={}, "
retval += "height={}, colspan={}, rowspan={}, text={})"
return retval.format(repr(self._start_row), repr(self._start_col),
repr(self._width), repr(self._height),
repr(self._colspan), repr(self._rowspan),
repr(self.text))
def __eq__(self, other):
"""
Checks if another cell is equivalent to this one.
"""
return (self.start_row == other.start_row and
self.start_col == other.start_col and
self.width == other.width and
self.height == other.height and
self.colspan == other.colspan and
self.rowspan == other.rowspan)
@property
def start_row(self):
"""
Returns the starting row for the cell.
"""
return self._start_row
@start_row.setter
def start_row(self, value):
"""
Sets the starting row of the cell to either 0 or the value passed in,
depending on which is larger.
"""
self._start_row = max(0, value)
@property
def start_col(self):
"""
Returns the starting column for the cell.
"""
return self._start_col
@start_col.setter
def start_col(self, value):
"""
Sets the starting column of the cell to either 0 or the value passed
in, depending on which is larger.
"""
self._start_col = max(0, value)
@property
def width(self):
"""
Returns the width (in number of characters) of the cell.
"""
return self._width
@width.setter
def width(self, value):
"""
Sets the width (in number of characters) of the cell to either 1 or the
value passed in, depending on which is larger.
"""
self._width = max(1, value)
@property
def height(self):
"""
Returns the height (in number of characters) of the cell.
"""
return self._height
@height.setter
def height(self, value):
"""
Sets the height (in number of characters) of the cell to either 1 or
the value passed in, depending on which is larger.
"""
self._height = max(1, value)
@property
def colspan(self):
"""
Returns the number of columns that this cell spans.
"""
return self._colspan
@colspan.setter
def colspan(self, value):
"""
Sets the number of columns that this cell spans to either 1 or the
value passed in, depending on which is larger.
"""
self._colspan = max(1, value)
@property
def rowspan(self):
"""
Returns the number of rows that this cell spans.
"""
return self._rowspan
@rowspan.setter
def rowspan(self, value):
"""
Sets the number of rows that this cell spans to either 1 or the value
passed in, depending on which is larger.
"""
self._rowspan = max(1, value)
@property
def end_row(self):
"""
Returns the index of which row the cell ends at within the block. This
is changed by modifying either the starting row or the height of this
cell.
"""
return self._start_row + self._height
@property
def end_col(self):
"""
Returns the index of which column the cell ends at within the block.
This is changed by modifying either the starting column or the width of
this cell.
"""
return self._start_col + self._width
class GridTableRow(object):
"""
A single row in a grid table, which can contain any number of cells. Cells
within a row may not start at the same index as where the row starts, since
they may span multiple columns.
"""
def __init__(self, start_row, is_header=False):
self._cells = []
self._start_row = start_row
self._height = None
self.is_header = is_header
def add_cell(self, cell):
"""
Adds a cell to the appropriate position in the row based on where its
left and right edges are. This returns false if a cell overlaps with
another cell in the row, otherwise it returns true.
"""
for i in range(0, len(self._cells)):
if cell.start_col + cell.width <= self._cells[i].start_col:
if i > 0 and not self._cells[i - 1].start_col + self._cells[i - 1].width <= cell.start_col:
return False
self._cells.insert(i, cell)
break
else:
if len(self._cells) > 0 and self._cells[-1].start_col + self._cells[-1].width > cell.start_col:
return False
self._cells.append(cell)
relative_height = cell.start_row + cell.height - self._start_row
if self._height is None or relative_height < self._height:
self._height = relative_height
return True
def get_all_cells(self):
"""
A generator which returns all cells within the row. I use a generator
since I mainly use this in for loops, and it's more memory efficient
and code efficient with a generator.
"""
for cell in self._cells:
yield cell
def get_all_cells_taller_than_this_row(self):
"""
A generator that gets all cells that are taller than this row (which
means they span multiple rows).
"""
for cell in self._cells:
if cell.start_row + cell.height > self._start_row + self._height:
yield cell
def get_all_cells_starting_at_this_row(self):
"""
A generator that gets all cells that start at this row (which means
they are not spanning from another row).
"""
for cell in self._cells:
if cell.start_row == self._start_row:
yield cell
def get_cell_starting_at_this_row_at_column(self, column):
"""
Returns the cell (or None if no cell is found) that starts in this row,
at the column specified.
"""
for cell in self.get_all_cells_starting_at_this_row():
if cell.start_col == column:
return cell
elif cell.start_col > column:
break
return None
@property
def height(self):
"""
Returns the height (in number of characters) of this row. The height is
equal to the height of the shortest cell in this row.
"""
return self._height
@property
def start_row(self):
"""
The index of the line in the block at which this row starts.
"""
return self._start_row
@property
def end_row(self):
"""
The index of the line in the block at which this row ends. This is
equal to the starting row plus the height.
"""
return self._start_row + self._height
@property
def start_col(self):
"""
The column in the block at which this row starts. If a cell starts at
this row, that cell's start column is returned. Otherwise, the furthest
right connected cell's (starting from the left) end column is returned.
"""
if len(self._cells) == 0:
return 0
left_cell = None
for cell in self._cells:
if cell.start_row == self.start_row:
return cell.start_col
if left_cell is None or left_cell.end_col == cell.start_col:
left_cell = cell
else:
break
return left_cell.end_col
@property
def end_col(self):
"""
Returns the ending column for this row. The ending column is equal to
the last cell's ending column.
"""
if len(self._cells) == 0:
return 0
return self._cells[-1].end_col
class GridTable(object):
"""
A grid table in its entirity. The start row and start column should be 0, 0
but can be set differently depending on the block. The width and height are
how many characters wide and high the table is.
"""
def __init__(self, start_row, start_col, height, width, first_row_header=False):
self._rows = [GridTableRow(start_row, is_header=first_row_header)]
self._start_row = start_row
self._start_col = start_col
self._width = width
self._height = height
def new_row(self, is_header=False, header_location=-1):
"""
Creates a new row which starts at the end of the previous row. Any
cells that are larger than the height of this row are added to the new
row.
"""
self._rows.append(GridTableRow(self._rows[-1].end_row, is_header))
for cell in self._rows[-2].get_all_cells_taller_than_this_row():
cell.rowspan += 1
self._rows[-1].add_cell(cell)
return self._rows[-1].start_row, self._rows[-1].start_col
def add_cell(self, cell):
"""
Adds a cell to the last row in the table.
"""
return self._rows[-1].add_cell(cell)
def get_all_rows(self):
"""
A generator that returns all rows in the table.
"""
for row in self._rows:
yield row
def get_all_cells_starting_at_column(self, column):
"""
A generator which yields all cells in all rows that start at a specific
column.
"""
for row in self._rows:
cell = row.get_cell_starting_at_this_row_at_column(column)
if cell is not None:
yield cell
def calculate_colspans(self):
"""
After all cells are added to the table, this function will calculate
all colspans for all cells in the array. It does this by walking
through all cells and finding each column in which a cell ends, and
increasing the colspans of all rows that start before that column and
ends after that column by one.
"""
start_col = self._start_col
end_col = self.end_col
cells = []
while start_col < end_col:
new_cells = list(self.get_all_cells_starting_at_column(start_col))
for cell in new_cells:
if cell not in cells:
cells.append(cell)
for cell in cells:
if cell.end_col < end_col:
end_col = cell.end_col
for i in range(len(cells) - 1, -1, -1):
if cells[i].end_col > end_col:
cells[i].colspan += 1
else:
del cells[i]
start_col = end_col
end_col = self.end_col
@property
def start_row(self):
"""
Returns the index of the row (in number of characters) that the table
starts at.
"""
return self._start_row
@property
def start_col(self):
"""
Returns the index of the column (in number of characters) that the
table starts at.
"""
return self._start_col
@property
def width(self):
"""
Returns the width (in number of characters) of the table.
"""
return self._width
@property
def height(self):
"""
Returns the height (in number of characters) of the table.
"""
return self._height
@property
def end_row(self):
"""
Returns the index of the row (in number of characters) that the table
ends at. It is equal to the starting row plus the height.
"""
return self._start_row + self._height
@property
def end_col(self):
"""
Returns the index of the column (in number of characters) that the
table ends at. It is equal to the starting row plus the height.
"""
return self._start_col + self._width
@property
def has_header(self):
return self._rows[0].is_header
class GridTableProcessor(markdown.blockprocessors.BlockProcessor):
"""
The markdown block processor used to parse a grid table. A malformed grid
table is generated as a block of text instead of being removed.
"""
_header_regex = r'\+=+(\+=+)*\+'
def test(self, parent, block):
"""
This function tests to see if the block of text passed in is a table or
not. A table is thus defined as a block of text which has more than 2
lines, has a '+-' on both the top and bottom left corners, has a '-+'
on both the top an bottom right corners, and has a '|' at the beginning
and end of the first and last rows.
"""
rows = []
Started = False
InTable = True
for r in block.split('\n'):
if not Started and (r.startswith('+') or r.startswith('|')):
Started = True
if Started and InTable and (r.startswith('+') or r.startswith('|')):
rows.append(r.strip())
elif Started:
InTable = False
val = (len(rows) > 2 and
rows[0][:2] == "+-" and rows[0][-2:] == "-+" and
rows[1][0] == '|' and rows[1][-1] == '|' and
rows[-2][0] == '|' and rows[-2][-1] == '|' and
rows[-1][:2] == "+-" and rows[-1][-2:] == "-+")
return val
def run(self, parent, blocks):
"""
Starts parsing the block of text which contains the table. It first
finds the header (if one exists) as ended by a row of '=' characters.
It then gets all the cells in the body (as a separate table from the
header; this needs to be changed). If getting either the header or the
body fails, the table is instead rendered as a block of text.
Otherwise, it is rendered as a table with the appropriate row and
column spans.
"""
block = blocks.pop(0)
before = []
rows = []
after = []
Started = False
InTable = True
for r in block.split('\n'):
if not Started:
if (r.startswith('+') or r.startswith('|')):
Started = True
else:
before.append(r)
if Started and InTable and (r.startswith('+') or r.startswith('|')):
rows.append(r.strip())
elif Started:
InTable = False
after.append(r)
if len(before) > 0:
self.parser.parseBlocks(parent, ["\n".join(before)])
try:
orig_block = rows
body_block = orig_block[:]
success, body = self._get_all_cells(body_block)
if not success:
self._render_as_block(parent, '\n'.join(orig_block))
return
pr = etree.SubElement(parent, 'div')
pr.set('class', "table-wrapper")
table = etree.SubElement(pr, 'table')
self._render_rows(body, table)
if len(after) > 0:
blocks.insert(0, "\n".join(after))
except:
blocks.insert(0, block)
return False
def _render_as_block(self, parent, text):
"""
Renders a table as a block of text instead of a table. This isn't done
correctly, since the serialized items are serialized again, but I'll
fix this later.
"""
trans_table = [(' ', ' '), ('<', '<'), ('>', '>'), ('&', '&')]
for from_char, to_char in trans_table:
text = text.replace(from_char, to_char)
div = etree.SubElement(parent, 'div')
div.set('class', 'grid-table-error')
div.text = text
def _header_exists(self, block):
"""
Checks if a header exists. A header is defined by a row of '='
characters.
"""
for row, i in zip(block, range(0, len(block))):
if re.match(self._header_regex, row):
return True, i, self._get_header(block)
return False, -1, block
def _get_header(self, block):
"""
Separates the header of the table from the body, putting them both into
their own separate blocks and replacing the header separator with a
normal separator.
"""
block = block[:]
for i in range(0, len(block)):
if re.match(self._header_regex, block[i]):
block[i] = block[i].replace('=', '-')
break
return block
def _render_rows(self, table, parent):
"""
Renders all rows in a table into 'tr' elements, and all cells into all
'td' elements.
"""
header_cell_tag = 'th'
body_cell_tag = 'td'
rendered = []
if table.has_header:
header_subparent = etree.SubElement(parent, 'thead')
body_subparent = etree.SubElement(parent, 'tbody')
else:
header_subparent = body_subparent = etree.SubElement(parent, 'tbody')
for row in table.get_all_rows():
if table.has_header and row.is_header:
subparent = header_subparent
else:
subparent = body_subparent
if len(list(row.get_all_cells())) != 0:
tr = etree.SubElement(subparent, 'tr')
for cell in row.get_all_cells():
if cell not in rendered:
if row.is_header:
cell_element = etree.SubElement(tr, header_cell_tag)
else:
cell_element = etree.SubElement(tr, body_cell_tag)
rendered.append(cell)
self.parser.parseBlocks(cell_element, cell.text.split('\n\n'))
cell_element.set('rowspan', str(cell.rowspan))
cell_element.set('colspan', str(cell.colspan))
def _get_all_cells(self, block):
"""
Finds all cells within the block and assembles them into a table
object. Not all rows in this table will have the same length due to
the possibility that cells span multiple rows. Returns the success or
failure of finding all cells and the table object itself. If this
fails, that means that the input was malformed.
"""
start_row = start_col = 0
header_exists, header_location, block = self._header_exists(block)
table = GridTable(start_row, start_col, len(block) - 1, len(block[0]) - 1, header_exists)
while start_row < len(block) - 1:
new_cell = self._scan_cell(block, start_row, start_col)
if new_cell is None or not table.add_cell(new_cell):
return False, table
if start_col + new_cell.width >= len(block[start_row]) - 1:
is_header = header_exists and table._rows[-1].end_row < header_location
start_row, start_col = table.new_row(is_header=is_header)
else:
start_col += new_cell.width
table.calculate_colspans()
return True, table
def _scan_cell(self, block, start_row, start_col):
"""
Starts scanning for a specific cell by checking the starting character
to make sure it's valid. It scans in the order right, down, left, up
to see if it can get back to its starting position. If it can, a new
GridTableCell is returned, and if it can't, None is returned.
"""
if block[start_row][start_col] != '+':
return None
return self._scan_right(block, start_row, start_col)
def _scan_right(self, block, start_row, start_col):
"""
Scans right until it gets to a '+' sign. It then starts scanning down
to see if it can find a complete path, if it can't, it continues
scanning right. Otherwise, it returns the cell it found.
"""
width = 1
while start_col + width < len(block[start_row]):
cur_col = start_col + width
if block[start_row][cur_col] == '+':
result = self._scan_down(block, start_row, start_col, cur_col)
if result is None:
width += 1
continue
return result
elif block[start_row][cur_col] == '-':
width += 1
else:
break
return None
def _scan_down(self, block, start_row, start_col, cur_col):
"""
Scans down until it gets to a '+' sign. It then starts scanning left
to see if it can find a complete path back to the starting position.
If it can, then it returns the cell it found, otherwise, it returns
None.
"""
height = 1
while start_row + height < len(block):
cur_row = start_row + height
if block[cur_row][cur_col] == '+':
result = self._scan_left(block, start_row, start_col, cur_col, cur_row)
if result is None:
height += 1
continue
return result
elif block[cur_row][cur_col] == '|':
height += 1
else:
break
return None
def _scan_left(self, block, start_row, start_col, cur_col, cur_row):
"""
Scans left until it gets to a '+' sign. It then starts scanning up to
verify that the path found is a complete cell and that it gets back to
the starting position. If it does, it returns the cel. Otherwise it
returns None.
"""
width = 1
while cur_col - width >= 0:
check_col = cur_col - width
if block[cur_row][check_col] == '+':
result = self._scan_up(block, start_row, start_col, cur_col, cur_row, check_col)
if result is None:
width += 1
continue
return result
elif block[cur_row][check_col] == '-':
width += 1
else:
break
return None
def _scan_up(self, block, start_row, start_col, cur_col, cur_row, check_col):
"""
Scans up until it gets to a '+' sign. If the '+' sign is in the
starting position, it returns a new GridTableCell. Otherwise, it
scans right again to verify it doesn't connect to any new paths and
create a new cell. If it does, then it returns None, since its
malformed. Otherwise it continues scanning up.
"""
height = 1
while cur_row - height >= 0:
check_row = cur_row - height
if block[check_row][check_col] == '+':
if start_row == check_row and start_col == check_col:
cell = GridTableCell(start_row, start_col, cur_col - start_col, height)
cell.text = self._gather_text(block, cell.start_row, cell.start_col, cell.end_row, cell.end_col)
return cell
result = self._scan_right(block, check_row, check_col)
if result is not None:
return None
height += 1
continue
elif block[check_row][check_col] == '|':
height += 1
else:
break
return None
def _gather_text(self, block, start_row, start_col, end_row, end_col):
"""
Gathers the text within the cell defined by the start row, start
column, end row, and end column and returns them as one string.
"""
text = []
for i in range(start_row + 1, end_row):
text.append(block[i][start_col + 1:end_col].rstrip())
return '\n'.join(self._unindent_one_level(text))
def _unindent_one_level(self, text):
"""
Unindents the text one level, up to the index of the farthest-left
non-blank character in the text.
"""
chars = 0
for i in range(0, len(max(text, key=len))):
for line in text:
if i < len(line) and line[i] != ' ':
break
else:
chars += 1
continue # This skips the break below
break
for i in range(0, len(text)):
text[i] = text[i][chars:]
return text
|
Situphen/Python-ZMarkdown
|
markdown/extensions/grid_tables.py
|
Python
|
bsd-3-clause
| 27,960
|
"""
This module holds the AuthKey class.
"""
import struct
from hashlib import sha1
from .._misc.binaryreader import BinaryReader
class AuthKey:
"""
Represents an authorization key, used to encrypt and decrypt
messages sent to Telegram's data centers.
"""
def __init__(self, data):
"""
Initializes a new authorization key.
:param data: the data in bytes that represent this auth key.
"""
self.key = data
@property
def key(self):
return self._key
@key.setter
def key(self, value):
if not value:
self._key = self.aux_hash = self.key_id = None
return
if isinstance(value, type(self)):
self._key, self.aux_hash, self.key_id = \
value._key, value.aux_hash, value.key_id
return
self._key = value
with BinaryReader(sha1(self._key).digest()) as reader:
self.aux_hash = reader.read_long(signed=False)
reader.read(4)
self.key_id = reader.read_long(signed=False)
# TODO This doesn't really fit here, it's only used in authentication
def calc_new_nonce_hash(self, new_nonce, number):
"""
Calculates the new nonce hash based on the current attributes.
:param new_nonce: the new nonce to be hashed.
:param number: number to prepend before the hash.
:return: the hash for the given new nonce.
"""
new_nonce = new_nonce.to_bytes(32, 'little', signed=True)
data = new_nonce + struct.pack('<BQ', number, self.aux_hash)
# Calculates the message key from the given data
return int.from_bytes(sha1(data).digest()[4:20], 'little', signed=True)
def __bool__(self):
return bool(self._key)
def __eq__(self, other):
return isinstance(other, type(self)) and other.key == self._key
|
LonamiWebs/Telethon
|
telethon/_crypto/authkey.py
|
Python
|
mit
| 1,895
|
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content
|
juju/juju-gui-charm
|
hooks/charmhelpers/core/templating.py
|
Python
|
agpl-3.0
| 3,186
|
# Copyright 2017 the Isard-vdi project authors:
# Josep Maria Viñolas Auquer
# Alberto Larraz Dalmases
# License: AGPLv3
#!flask/bin/python
# coding=utf-8
import json
import time
from flask import (
Response,
redirect,
render_template,
request,
send_from_directory,
url_for,
)
from flask_login import current_user, login_required
from webapp import app
from ...lib import admin_api
app.adminapi = admin_api.isardAdmin()
import rethinkdb as r
from ...lib.flask_rethink import RethinkDB
db = RethinkDB(app)
db.init_app(app)
from .decorators import isAdmin, isAdminManager
"""
LANDING ADMIN PAGE
"""
@app.route("/isard-admin/admin")
@login_required
@isAdmin
def admin():
return render_template(
"admin/pages/hypervisors.html",
title="Hypervisors",
header="Hypervisors",
nav="Hypervisors",
)
@app.route("/isard-admin/admin/table/<table>/get")
@login_required
@isAdminManager
def admin_table_get(table):
result = app.adminapi.get_admin_table(table)
if table == "scheduler_jobs":
for i, val in enumerate(result):
result[i].pop("job_state", None)
if current_user.role == "manager":
if table == "categories":
result = [
{**r, **{"editable": False}}
for r in result
if r["id"] == current_user.category
]
if table == "groups":
result = [
r
for r in result
if "parent_category" in r.keys()
and r["parent_category"] == current_user.category
]
if table == "roles":
result = [r for r in result if r["id"] != "admin"]
return json.dumps(result), 200, {"Content-Type": "application/json"}
# Used in quota.js for admin users
@app.route("/isard-admin/admin/load/<table>/post", methods=["POST"])
@login_required
@isAdminManager
def admin_load_post(table):
if request.method == "POST":
data = request.get_json(force=True)
if "id" not in data.keys():
data["id"] = False
if "pluck" not in data.keys():
data["pluck"] = False
if "order" not in data.keys():
data["order"] = False
if "flatten" not in data.keys():
data["flatten"] = True
if table == "media" and current_user.role == "manager":
result = app.isardapi.get_all_alloweds_table(
"media", current_user.id, pluck=False
)
else:
result = app.adminapi.get_admin_table(
table,
id=data["id"],
pluck=data["pluck"],
order=data["order"],
flatten=data["flatten"],
)
return json.dumps(result), 200, {"Content-Type": "application/json"}
return json.dumps("Could not delete."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/table/<table>/post", methods=["POST"])
@login_required
@isAdminManager
def admin_table_post(table):
if request.method == "POST":
data = request.get_json(force=True)
if "pluck" not in data.keys():
data["pluck"] = False
if "kind" not in data.keys():
data["kind"] = False
# ~ else:
# ~ if data['kind']=='template':
# ~ result=app.adminapi.get_admin_table_term(table,'name',data['term'],pluck=data['pluck'],kind=data['kind'])
# ~ result=app.adminapi.get_admin_table_term(table,'name',data['term'],pluck=data['pluck'],kind=data['kind'])
# ~ result=app.adminapi.get_admin_table_term(table,'name',data['term'],pluck=data['pluck'],kind=data['kind'])
# ~ else:
# ~ if data['kind']='not_desktops':
# ~ result=app.adminapi.get_admin_table_term(table,'name',data['term'],pluck=data['pluck'],kind=)
# ~ if 'order' not in data.keys():
# ~ data['order']=False
result = app.adminapi.get_admin_table_term(
table, "name", data["term"], pluck=data["pluck"], kind=data["kind"]
)
return json.dumps(result), 200, {"Content-Type": "application/json"}
return json.dumps("Could not delete."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/getAllTemplates", methods=["POST"])
@login_required
@isAdminManager
def admin_get_all_templates():
if request.method == "POST":
data = request.get_json(force=True)
result = app.adminapi.get_admin_templates(data["term"])
if current_user.role == "manager":
result = [d for d in result if d["category"] == current_user.category]
return json.dumps(result), 200, {"Content-Type": "application/json"}
return json.dumps("Could not delete."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/delete", methods=["POST"])
@login_required
@isAdminManager
def admin_delete():
if request.method == "POST":
if app.adminapi.delete_table_key(
request.get_json(force=True)["table"], request.get_json(force=True)["pk"]
):
return json.dumps("Deleted"), 200, {"Content-Type": "application/json"}
return json.dumps("Could not delete."), 500, {"Content-Type": "application/json"}
"""
CONFIG
"""
@app.route("/isard-admin/admin/config", methods=["GET", "POST"])
@login_required
@isAdminManager
def admin_config():
if request.method == "POST":
return (
json.dumps(app.adminapi.get_admin_config(1)),
200,
{"Content-Type": "application/json"},
)
return render_template("admin/pages/config.html", nav="Config")
# ~ @app.route('/isard-admin/admin/disposables', methods=["POST"])
# ~ @login_required
# ~ @isAdmin
# ~ def admin_disposables():
# ~ result=app.adminapi.get_admin_table('disposables')
# ~ return json.dumps(result), 200, {'Content-Type':'application/json'}
@app.route("/isard-admin/admin/config/update", methods=["POST"])
@login_required
@isAdminManager
def admin_config_update():
if request.method == "POST":
dict = app.isardapi.f.unflatten_dict(request.form)
if "auth" in dict:
dict["auth"]["local"] = (
{"active": False} if "local" not in dict["auth"] else {"active": True}
)
if "engine" in dict:
if "grafana" in dict["engine"]:
dict["engine"]["grafana"]["active"] = (
False if "active" not in dict["engine"]["grafana"] else True
)
if "ssh" in dict["engine"]:
if "hidden" in dict["engine"]["ssh"]:
dict["engine"]["ssh"]["paramiko_host_key_policy_check"] = (
True
if "paramiko_host_key_policy_check" in dict["engine"]["ssh"]
else False
)
dict["engine"]["ssh"].pop("hidden", None)
if "disposable_desktops" in dict:
dict["disposable_desktops"].pop("id", None)
dict["disposable_desktops"]["active"] = (
False if "active" not in dict["disposable_desktops"] else True
)
if app.adminapi.update_table_dict("config", 1, dict):
# ~ return json.dumps('Updated'), 200, {'Content-Type':'application/json'}
return render_template("admin/pages/config.html", nav="Config")
return json.dumps("Could not update."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/disposable/add", methods=["POST"])
@login_required
@isAdminManager
def admin_disposable_add():
if request.method == "POST":
dsps = []
# ~ Next 2 lines should be removed when form returns a list
nets = [request.form["nets"]]
disposables = request.form.getlist("disposables")
for d in disposables:
dsps.append(
app.adminapi.get_admin_table(
"domains", pluck=["id", "name", "description"], id=d
)
)
disposable = [
{
"id": app.isardapi.parse_string(request.form["name"]),
"active": True,
"name": request.form["name"],
"description": request.form["description"],
"nets": nets,
"disposables": dsps,
}
]
if app.adminapi.insert_table_dict("disposables", disposable):
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
return json.dumps("Could not update."), 500, {"Content-Type": "application/json"}
"""
BACKUP & RESTORE
"""
@app.route("/isard-admin/admin/backup", methods=["POST"])
@login_required
@isAdmin
def admin_backup():
if request.method == "POST":
app.adminapi.backup_db()
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/restore", methods=["POST"])
@login_required
@isAdmin
def admin_restore():
if request.method == "POST":
app.adminapi.restore_db(request.get_json(force=True)["pk"])
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/restore/<table>", methods=["POST"])
@login_required
@isAdmin
def admin_restore_table(table):
global backup_data, backup_db
if request.method == "POST":
data = request.get_json(force=True)["data"]
insert = data["new_backup_data"]
data.pop("new_backup_data", None)
if insert:
if app.adminapi.insert_table_dict(table, data):
return json.dumps("Inserted"), 200, {"Content-Type": "application/json"}
else:
id = data["id"]
data.pop("id", None)
if app.adminapi.update_table_dict(table, id, data):
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/backup_remove", methods=["POST"])
@login_required
@isAdmin
def admin_backup_remove():
if request.method == "POST":
app.adminapi.remove_backup_db(request.get_json(force=True)["pk"])
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
backup_data = {}
backup_db = []
@app.route("/isard-admin/admin/backup_info", methods=["POST"])
@login_required
@isAdmin
def admin_backup_info():
global backup_data, backup_db
if request.method == "POST":
backup_data, backup_db = app.adminapi.info_backup_db(
request.get_json(force=True)["pk"]
)
return json.dumps(backup_data), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/backup_detailinfo", methods=["POST"])
@login_required
@isAdmin
def admin_backup_detailinfo():
global backup_data, backup_db
if request.method == "POST":
table = request.get_json(force=True)["table"]
if table == "":
return json.dumps({}), 200, {"Content-Type": "application/json"}
new_db = app.adminapi.check_new_values(table, backup_db[table])
return json.dumps(new_db), 200, {"Content-Type": "application/json"}
return json.dumps("Method not allowed."), 500, {"Content-Type": "application/json"}
@app.route("/isard-admin/admin/backup/download/<id>", methods=["GET"])
@login_required
@isAdmin
def admin_backup_download(id):
filedir, filename, data = app.adminapi.download_backup(id)
return Response(
data,
mimetype="application/x-gzip",
headers={"Content-Disposition": "attachment;filename=" + filename},
)
@app.route("/isard-admin/admin/backup/upload", methods=["POST"])
@login_required
@isAdmin
def admin_backup_upload():
for f in request.files:
app.adminapi.upload_backup(request.files[f])
return json.dumps("Updated"), 200, {"Content-Type": "application/json"}
|
isard-vdi/isard
|
webapp/webapp/webapp/admin/views/AdminViews.py
|
Python
|
agpl-3.0
| 12,269
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
import random
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
try:
import unittest2 as unittest
except ImportError:
import unittest
from popbill import *
class CashbillServiceTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
self.cashbillService = CashbillService('TESTER', 'SwWxqU+0TErBXy/9TVjIPEnI0VTUMMSQZtJf3Ed8q3I=')
self.cashbillService.IsTest = True
self.testCorpNum = "1234567890"
self.testUserID = "testkorea"
self.testMgtKey = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz1234567890', 10))
def test_getChargeInfo(self):
chrgInfo = self.cashbillService.getChargeInfo(self.testCorpNum, self.testUserID)
print(chrgInfo.unitCost)
print(chrgInfo.rateSystem)
print(chrgInfo.chargeMethod)
def test_search(self):
DType = "R"
SDate = "20180901"
EDate = "20181008"
State = ["3**", "4**"]
TradeType = ["N", "C"]
TradeUsage = ["P", "C"]
TaxationType = ["T", "N"]
Page = 1
PerPage = 10
Order = "D"
QString = ""
TradeOpt = ["N", "B", "T"]
try:
result = self.cashbillService.search(self.testCorpNum, DType, SDate, EDate, State, TradeType, TradeUsage,
TaxationType, Page, PerPage, Order, self.testUserID, QString, TradeOpt)
print(result.total)
self.assertEqual(result.code, 1, "등록 오류 : " + result.message)
except PopbillException as PE:
print(PE.message)
def test_registIssue(self):
cashbill = Cashbill(mgtKey="20191024-07",
tradeType="승인거래",
tradeUsage="소득공제용",
taxationType="과세",
identityNum="01012341234",
franchiseCorpNum="1234567890",
franchiseCorpName="발행자 상호",
franchiseCEOName="발행 대표자 성명",
franchiseAddr="발행자 주소",
franchiseTEL="07075103710",
smssendYN=False,
customerName="고객명",
itemName="상품명",
orderNumber="주문번호",
email="code@linkhub.co.kr",
hp="010000000",
fax="07075103710",
supplyCost="15000",
tax="5000",
serviceFee="0",
totalAmount="20000"
)
try:
result = self.cashbillService.registIssue(self.testCorpNum, cashbill, "발행메모", "testkorea", "")
self.assertEqual(result.code, 1, "등록 오류 : " + result.message)
except PopbillException as PE:
print(PE.message)
def test_getInfos(self):
infos = self.cashbillService.getInfos(self.testCorpNum, ["20150707-01", "20150706-01"])
for info in infos:
print("info : %s" % info.mgtKey)
for key, value in info.__dict__.items():
if not key.startswith("__"):
print(" %s : %s" % (key, value))
self.assertGreater(len(infos), 0, "갯수 확인")
def test_getBalance(self):
balance = self.cashbillService.getBalance(self.testCorpNum)
print(balance)
self.assertGreaterEqual(balance, 0, '잔액 0 이상.')
def test_getPartnerBalance(self):
balance = self.cashbillService.getPartnerBalance(self.testCorpNum)
print(balance)
self.assertGreaterEqual(balance, 0, '잔액 0 이상.')
def test_checkIsMember(self):
result = self.cashbillService.checkIsMember(self.testCorpNum)
self.assertEqual(result.code, 1, result.message + ", 가입시 코드는 1")
result = self.cashbillService.checkIsMember("1234568790")
self.assertEqual(result.code, 0, result.message + ", 미가입시 코드는 0")
def test_getURL(self):
url = self.cashbillService.getURL(self.testCorpNum, self.testUserID, "PBOX")
self.assertEqual(url[:5], "https", "https로 시작")
print("PBOX url : " + url)
def test_getUnitCost(self):
unitCost = self.cashbillService.getUnitCost(self.testCorpNum)
self.assertGreaterEqual(unitCost, 0, "단가는 0 이상.")
def test_02_checkMgtKeyInUse(self):
bIsInUse = self.cashbillService.checkMgtKeyInUse(self.testCorpNum, "20150325-01")
self.assertEqual(bIsInUse, True, "등록으로 확인")
bIsInUse = self.cashbillService.checkMgtKeyInUse(self.testCorpNum, "20150325-535")
self.assertEqual(bIsInUse, False, "미등록으로 확인")
def test_getPopbillURL(self):
url = self.cashbillService.getPopbillURL(self.testCorpNum, self.testUserID, "LOGIN")
self.assertEqual(url[:5], "https", "https로 시작")
def test_01_register(self):
cashbill = Cashbill(mgtKey="20150325-01",
tradeType="승인거래",
tradeUsage="소득공제용",
taxationType="과세",
identityNum="010000000",
franchiseCorpNum="1234567890",
franchiseCorpName="발행자 상호",
franchiseCEOName="발행 대표자 성명",
franchiseAddr="발행자 주소",
franchiseTEL="07075103710",
smssendYN=False,
customerName="고객명",
itemName="상품명",
orderNumber="주문번호",
email="test@test.com",
hp="010000000",
fax="07075103710",
supplyCost="15000",
tax="5000",
serviceFee="0",
totalAmount="20000"
)
try:
result = self.cashbillService.register(self.testCorpNum, cashbill)
self.assertEqual(result.code, 1, "등록 오류 : " + result.message)
except PopbillException as PE:
print(PE.message)
def test_02_update(self):
cashbill = Cashbill(mgtKey="20150325-01",
tradeType="승인거래",
tradeUsage="소득공제용",
taxationType="과세",
identityNum="01012341234",
franchiseCorpNum="1234567890",
franchiseCorpName="발행자 상호",
franchiseCEOName="발행 대표자 성명",
franchiseAddr="발행자 주소",
franchiseTEL="07075103710",
smssendYN=False,
customerName="고객명",
itemName="상품명",
orderNumber="주문번호",
email="test@test.com",
hp="010000000",
fax="07075103710",
supplyCost="15000",
tax="5000",
serviceFee="0",
totalAmount="20000"
)
try:
result = self.cashbillService.update(self.testCorpNum, '20150325-01', cashbill)
self.assertEqual(result.code, 1, "수정 오류 : " + result.message)
print(result.message)
except PopbillException as PE:
print(PE.message)
def test_03_issue(self):
try:
result = self.cashbillService.issue(self.testCorpNum, '20150325-01', "발행메모")
self.assertEqual(result.code, 1, "발행 오류 : " + result.message)
print(result.message)
except PopbillException as PE:
print(PE.message)
def test_04_cancelIssue(self):
result = self.cashbillService.cancelIssue(self.testCorpNum, "20150325-01", "발행취소 메모1")
self.assertEqual(result.code, 1, "발행취소 오류 : " + result.message)
def test_05_getInfo(self):
result = self.cashbillService.getInfo(self.testCorpNum, "20180926_06")
print(result.itemKey)
self.assertEqual(result.mgtKey, "20180926_06", "getInfo 오류 :" + str(result.message))
def test_06_getDetailInfo(self):
result = self.cashbillService.getDetailInfo(self.testCorpNum, "20171114-20")
print result.cancelType
self.assertEqual(result.mgtKey, "20171114-20", "getDetail오류 :" + str(result.message))
def test_07_sendEmail(self):
result = self.cashbillService.sendEmail(self.testCorpNum, "20150325-01", "test@test.com")
self.assertEqual(result.code, 1, "이메일 재전송 오류 : " + result.message)
def test_08_sendSMS(self):
result = self.cashbillService.sendSMS(self.testCorpNum, "20150325-01", "07075103710", "010111222", "문자메시지 테스트")
self.assertEqual(result.code, 1, "알림문자 전송 오류 : " + result.message)
def test_09_sendFax(self):
result = self.cashbillService.sendFAX(self.testCorpNum, "20150325-01", "07075103710", "010111222")
self.assertEqual(result.code, 1, "알림문자 전송 오류 : " + result.message)
def test_10_getLogs(self):
result = self.cashbillService.getLogs(self.testCorpNum, "20150325-01")
print(result[0].log)
def test_12_getPopUpURL(self):
url = self.cashbillService.getPopUpURL(self.testCorpNum, "20150325-01", self.testUserID)
self.assertEqual(url[:5], "https", "https로 시작 ")
print("PopupURL : " + url)
def test_13_getPrintURL(self):
url = self.cashbillService.getPrintURL(self.testCorpNum, "20150325-01", self.testUserID)
self.assertEqual(url[:5], "https", "https로 시작 ")
print("Print URL : " + url)
def test_14_getEPrintURL(self):
url = self.cashbillService.getEPrintURL(self.testCorpNum, "20150325-01", self.testUserID)
self.assertEqual(url[:5], "https", "https로 시작 ")
print("EPRINT url : " + url)
def test_15_getMailURL(self):
url = self.cashbillService.getMailURL(self.testCorpNum, "20150325-01", self.testUserID)
self.assertEqual(url[:5], "https", "https로 시작 ")
print("mailURL : " + url)
def test_16_getMassPrintURL(self):
MgtKeyList = ["20150225-02", "20150320-01", "20150320-03"]
url = self.cashbillService.getMassPrintURL(self.testCorpNum, MgtKeyList, self.testUserID)
self.assertEqual(url[:5], "https", "https로 시작 ")
print("PopupURL : " + url)
def test_99_delete(self):
result = self.cashbillService.delete(self.testCorpNum, "20150325-01")
self.assertEqual(result.code, 1, "삭제 오류 : " + result.message)
def test_revokeRegistIssue(self):
mgtKey = "20170817-32"
orgConfirmNum = "820116333"
orgTradeDate = "20170711"
try:
result = self.cashbillService.revokeRegistIssue(self.testCorpNum, mgtKey, orgConfirmNum, orgTradeDate)
self.assertEqual(result.code, 1, "등록 오류 : " + result.message)
except PopbillException as PE:
print(PE.message)
def test_revokeRegister(self):
mgtKey = "20171113-18"
orgConfirmNum = "820116333"
orgTradeDate = "20170711"
smssendYN = False
userID = None
try:
result = self.cashbillService.revokeRegister(self.testCorpNum, mgtKey, orgConfirmNum,
orgTradeDate, smssendYN)
self.assertEqual(result.code, 1, "등록 오류 : " + result.message)
except PopbillException as PE:
print(PE.message)
def test_listEmailConfig(self):
result = self.cashbillService.listEmailConfig(self.testCorpNum)
print(len(result))
def test_updateEmailConfig(self):
EmailType = "CSH_ISSUE"
SendYN = True
try:
result = self.cashbillService.updateEmailConfig(self.testCorpNum, EmailType, SendYN)
print(result)
except PopbillException as PE:
print(PE.message)
def test_assignMgtKey(self):
response = self.cashbillService.assignMgtKey(self.testCorpNum, "020072713543600001", "20200727-01",
self.testUserID)
print(response.code)
print(response.message)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CashbillServiceTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
|
linkhub-sdk/popbill.py
|
cashbilltests.py
|
Python
|
mit
| 13,587
|
"""init
Revision ID: a578b9de5d89
Revises:
Create Date: 2016-12-13 21:36:44.114754
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a578b9de5d89'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
|
Lifeistrange/flaskweb
|
migrations/versions/a578b9de5d89_init.py
|
Python
|
mit
| 1,286
|
from chaco.api import Plot
from chaco.tools.toolbars.plot_toolbar import PlotToolbar
from traits.api import Type, DelegatesTo, Instance, Enum, \
on_trait_change
class ToolbarPlot(Plot):
# Should we turn on the auto-hide feature on the toolbar?
auto_hide = DelegatesTo('toolbar')
toolbar = Instance(PlotToolbar)
toolbar_class = Type(PlotToolbar)
toolbar_added = False
# Location of the default toolbar that is created if a toolbar
# is not specified with the `toolbar` attribute. Changing this
# attribute after the ToolbarPlot instance is created has no effect;
# use obj.toolbar.location to dynamically change the location of the
# instance `obj`s toolbar.
toolbar_location = Enum('top', 'right', 'bottom', 'left')
def __init__(self, *args, **kw):
# initialize the toolbar class before super() has a chance to create
# the default using the default class. This can happen because of
# ordering issues
if "toolbar_class" in kw:
self.toolbar_class = kw.pop("toolbar_class")
super(ToolbarPlot, self).__init__(*args, **kw)
self.toolbar.component = self
self.add_toolbar()
def _toolbar_default(self):
return self.toolbar_class(self, location=self.toolbar_location)
def add_toolbar(self):
if not self.toolbar_added:
self.overlays.append(self.toolbar)
self.toolbar_added = True
self.request_redraw()
def remove_toolbar(self):
if self.toolbar_added and self.auto_hide:
self.overlays.remove(self.toolbar)
self.toolbar_added = False
self.request_redraw()
def _bounds_changed(self, old, new):
self.toolbar.do_layout(force=True)
super(ToolbarPlot, self)._bounds_changed(old, new)
@on_trait_change('toolbar')
def _toolbar_changed(self, name, obj, old, new):
if self.toolbar_added:
# fixup the new toolbar's component to match the old one
new.component = old.component
self.overlays.remove(old)
self.toolbar_added = False
self.add_toolbar()
|
tommy-u/chaco
|
chaco/toolbar_plot.py
|
Python
|
bsd-3-clause
| 2,172
|
from .base import AbstractStatistics
from ..compat import pickle
from ..price_parser import PriceParser
import datetime
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class SimpleStatistics(AbstractStatistics):
"""
Simple Statistics provides a bare-bones example of statistics
that can be collected through trading.
Statistics included are Sharpe Ratio, Drawdown, Max Drawdown,
Max Drawdown Duration.
TODO think about Alpha/Beta, compare strategy of benchmark.
TODO think about speed -- will be bad doing for every tick
on anything that trades sub-minute.
TODO think about slippage, fill rate, etc
TODO brokerage costs?
TODO need some kind of trading-frequency parameter in setup.
Sharpe calculations need to know if daily, hourly, minutely, etc.
"""
def __init__(self, config, portfolio_handler):
"""
Takes in a portfolio handler.
"""
self.config = config
self.drawdowns = [0]
self.equity = []
self.equity_returns = [0.0]
# Initialize timeseries. Correct timestamp not available yet.
self.timeseries = ["0000-00-00 00:00:00"]
# Initialize in order for first-step calculations to be correct.
current_equity = PriceParser.display(portfolio_handler.portfolio.equity)
self.hwm = [current_equity]
self.equity.append(current_equity)
def update(self, timestamp, portfolio_handler):
"""
Update all statistics that must be tracked over time.
"""
if timestamp != self.timeseries[-1]:
# Retrieve equity value of Portfolio
current_equity = PriceParser.display(portfolio_handler.portfolio.equity)
self.equity.append(current_equity)
self.timeseries.append(timestamp)
# Calculate percentage return between current and previous equity value.
pct = ((self.equity[-1] - self.equity[-2]) / self.equity[-1]) * 100
self.equity_returns.append(round(pct, 4))
# Calculate Drawdown.
self.hwm.append(max(self.hwm[-1], self.equity[-1]))
self.drawdowns.append(self.hwm[-1] - self.equity[-1])
def get_results(self):
"""
Return a dict with all important results & stats.
"""
# Modify timeseries in local scope only. We initialize with 0-date,
# but would rather show a realistic starting date.
timeseries = self.timeseries
timeseries[0] = pd.to_datetime(timeseries[1]) - pd.Timedelta(days=1)
statistics = {}
statistics["sharpe"] = self.calculate_sharpe()
statistics["drawdowns"] = pd.Series(self.drawdowns, index=timeseries)
statistics["max_drawdown"] = max(self.drawdowns)
statistics["max_drawdown_pct"] = self.calculate_max_drawdown_pct()
statistics["equity"] = pd.Series(self.equity, index=timeseries)
statistics["equity_returns"] = pd.Series(self.equity_returns, index=timeseries)
return statistics
def calculate_sharpe(self, benchmark_return=0.00):
"""
Calculate the sharpe ratio of our equity_returns.
Expects benchmark_return to be, for example, 0.01 for 1%
"""
excess_returns = pd.Series(self.equity_returns) - benchmark_return / 252
# Return the annualised Sharpe ratio based on the excess daily returns
return round(self.annualised_sharpe(excess_returns), 4)
def annualised_sharpe(self, returns, N=252):
"""
Calculate the annualised Sharpe ratio of a returns stream
based on a number of trading periods, N. N defaults to 252,
which then assumes a stream of daily returns.
The function assumes that the returns are the excess of
those compared to a benchmark.
"""
return np.sqrt(N) * returns.mean() / returns.std()
def calculate_max_drawdown_pct(self):
"""
Calculate the percentage drop related to the "worst"
drawdown seen.
"""
drawdown_series = pd.Series(self.drawdowns)
equity_series = pd.Series(self.equity)
bottom_index = drawdown_series.idxmax()
try:
top_index = equity_series[:bottom_index].idxmax()
pct = (
(equity_series.ix[top_index] - equity_series.ix[bottom_index]) /
equity_series.ix[top_index] * 100
)
return round(pct, 4)
except ValueError:
return np.nan
def plot_results(self):
"""
A simple script to plot the balance of the portfolio, or
"equity curve", as a function of time.
"""
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
# Plot two charts: Equity curve, period returns
fig = plt.figure()
fig.patch.set_facecolor('white')
df = pd.DataFrame()
df["equity"] = pd.Series(self.equity, index=self.timeseries)
df["equity_returns"] = pd.Series(self.equity_returns, index=self.timeseries)
df["drawdowns"] = pd.Series(self.drawdowns, index=self.timeseries)
# Plot the equity curve
ax1 = fig.add_subplot(311, ylabel='Equity Value')
df["equity"].plot(ax=ax1, color=sns.color_palette()[0])
# Plot the returns
ax2 = fig.add_subplot(312, ylabel='Equity Returns')
df['equity_returns'].plot(ax=ax2, color=sns.color_palette()[1])
# drawdown, max_dd, dd_duration = self.create_drawdowns(df["Equity"])
ax3 = fig.add_subplot(313, ylabel='Drawdowns')
df['drawdowns'].plot(ax=ax3, color=sns.color_palette()[2])
# Rotate dates
fig.autofmt_xdate()
# Plot the figure
plt.show()
def get_filename(self, filename=""):
if filename == "":
now = datetime.datetime.utcnow()
filename = "statistics_" + now.strftime("%Y-%m-%d_%H%M%S") + ".pkl"
filename = os.path.expanduser(os.path.join(self.config.OUTPUT_DIR, filename))
return filename
def save(self, filename=""):
filename = self.get_filename(filename)
print("Save results to '%s'" % filename)
with open(filename, 'wb') as fd:
pickle.dump(self, fd)
|
nwillemse/nctrader
|
nctrader/statistics/simple.py
|
Python
|
mit
| 6,332
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'EmailMessage.tag'
db.alter_column(u'unisender_emailmessage', 'tag_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, on_delete=models.SET_NULL, to=orm['unisender.Tag']))
# Changing field 'EmailMessage.list_id'
db.alter_column(u'unisender_emailmessage', 'list_id_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, on_delete=models.SET_NULL, to=orm['unisender.SubscribeList']))
def backwards(self, orm):
# Changing field 'EmailMessage.tag'
db.alter_column(u'unisender_emailmessage', 'tag_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['unisender.Tag']))
# User chose to not deal with backwards NULL issues for 'EmailMessage.list_id'
raise RuntimeError("Cannot reverse this migration. 'EmailMessage.list_id' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'EmailMessage.list_id'
db.alter_column(u'unisender_emailmessage', 'list_id_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['unisender.SubscribeList']))
models = {
u'unisender.campaign': {
'Meta': {'ordering': "('name',)", 'object_name': 'Campaign'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'campaign'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['unisender.Subscriber']"}),
'email_message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['unisender.EmailMessage']"}),
'err_delivery_failed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_dest_invalid': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_domain_inactive': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_internal': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_lost': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_mailbox_full': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_not_allowed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_not_available': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_resend': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_skip_letter': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_folder': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_rejected': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_retry': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_spam_skipped': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_src_invalid': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_unsubscribed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_user_inactive': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_user_unknown': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'err_will_retry': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'not_sent': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_delivered': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_link_visited': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_read': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_spam_folder': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'ok_unsubscribed': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'payment_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'track_ga': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'track_links': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'track_read': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '50'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.emailmessage': {
'Meta': {'ordering': "('subject',)", 'object_name': 'EmailMessage'},
'body': ('tinymce_4.fields.TinyMCEModelField', [], {}),
'categories': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'generate_text': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': "'ru'", 'max_length': '50'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'list_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['unisender.SubscribeList']"}),
'sender_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'series_day': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'series_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2014, 7, 21, 0, 0)'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emails'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['unisender.Tag']"}),
'text_body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'wrap_type': ('django.db.models.fields.CharField', [], {'default': "'skip'", 'max_length': '50'})
},
u'unisender.field': {
'Meta': {'ordering': "('name',)", 'object_name': 'Field'},
'field_type': ('django.db.models.fields.CharField', [], {'default': "'string'", 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'unisender.smsmessage': {
'Meta': {'object_name': 'SmsMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscribelist': {
'Meta': {'ordering': "('title',)", 'object_name': 'SubscribeList'},
'after_subscribe_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'before_subscribe_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscriber': {
'Meta': {'ordering': "('contact',)", 'object_name': 'Subscriber'},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'contact_type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '50'}),
'double_optin': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'list_ids': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subscribers'", 'symmetrical': 'False', 'to': u"orm['unisender.SubscribeList']"}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscribers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['unisender.Tag']"}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'unisender.subscriberfields': {
'Meta': {'object_name': 'SubscriberFields'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['unisender.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['unisender.Subscriber']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'unisender.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unisender_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['unisender']
|
ITCase-django/django-unisender
|
unisender/south_migrations/0003_auto__chg_field_emailmessage_tag__chg_field_emailmessage_list_id.py
|
Python
|
mit
| 12,888
|
DEFAULT_STEMMER = 'snowball'
DEFAULT_TOKENIZER = 'word'
DEFAULT_TAGGER = 'pos'
TRAINERS = ['news', 'editorial', 'reviews', 'religion',
'learned', 'science_fiction', 'romance', 'humor']
DEFAULT_TRAIN = 'news'
|
viktorRock/myFirstPythonAPI
|
pythapp/nltk/nltk_constants.py
|
Python
|
unlicense
| 222
|
"""Tests for the bob_emploi.lib.fhs module."""
import datetime
import typing
from typing import Any, Dict, Iterator, Optional, Mapping
import unittest
from unittest import mock
from bob_emploi.data_analysis.lib import fhs
# Jobseeker criteria provided per unemployment period.
class _JobseekerCriteria(typing.NamedTuple):
jobseeker_unique_id: str
code_rome: str
departement: Optional[str]
gender: Optional[str]
class FhsTestCase(unittest.TestCase):
"""Unit tests for FHS functions."""
@mock.patch(fhs.__name__ + '.migration_helpers.flatten_iterator')
def test_job_seeker_iterator(self, mock_flatten_iterator: mock.MagicMock) -> None:
"""Basic usage of job_seeker_iterator."""
def _flatten_iterator(filename: str) -> Iterator[Dict[str, Any]]:
if '/de.csv' in filename:
return iter([
{
'IDX': '1',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': filename.replace('*', 'Reg01'),
},
{
'IDX': '15',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': filename.replace('*', 'Reg01'),
},
{
'IDX': '2',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': filename.replace('*', 'Reg21'),
},
])
if '/e0.csv' in filename:
return iter([
{
'IDX': '1',
'HOURS': 42,
'MOIS': '201510',
'__file__': filename.replace('*', 'Reg01'),
},
{
'IDX': '1',
'HOURS': 43,
'MOIS': '201510',
'__file__': filename.replace('*', 'Reg01'),
},
{
'IDX': '2',
'HOURS': 27,
'MOIS': '201510',
'__file__': filename.replace('*', 'Reg21'),
},
])
raise ValueError(f'Called with "{filename}"')
mock_flatten_iterator.side_effect = _flatten_iterator
seekers = list(
fhs.job_seeker_iterator('/folder/path/', tables=('de', 'e0')))
data = [j._data for j in seekers] # pylint: disable=protected-access
self.assertEqual([
{
'de': [{
'IDX': '1',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': '/folder/path/Reg01/de.csv',
}],
'e0': [
{
'IDX': '1',
'HOURS': 42,
'MOIS': '201510',
'__file__': '/folder/path/Reg01/e0.csv',
},
{
'IDX': '1',
'HOURS': 43,
'MOIS': '201510',
'__file__': '/folder/path/Reg01/e0.csv',
},
],
},
{
'de': [{
'IDX': '15',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': '/folder/path/Reg01/de.csv',
}],
'e0': [],
},
{
'de': [{
'IDX': '2',
'ROME': 'foo',
'DATINS': datetime.date(2015, 12, 1),
'__file__': '/folder/path/Reg21/de.csv',
}],
'e0': [{
'IDX': '2',
'HOURS': 27,
'MOIS': '201510',
'__file__': '/folder/path/Reg21/e0.csv',
}],
},
], data)
def test_job_seeker_key_idx(self) -> None:
"""Test of the IDX property of key created by job_seeker_key."""
key = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '47',
})
self.assertEqual('47', str(key.IDX))
def test_job_seeker_key_equality_across_tables(self) -> None:
"""Test that job_seeker_key creates equal keys across 2 FHS tables."""
key_de = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '47',
})
key_e0 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/e0_ech201512.csv',
'IDX': '47',
})
self.assertEqual(key_de, key_e0)
def test_job_seeker_key_increasing(self) -> None:
"""Test that job_seeker_key creates increasing keys."""
key_1 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '1',
})
key_2 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '2',
})
self.assertLess(key_1, key_2)
def test_job_seeker_key_increasing_integers(self) -> None:
"""Test that job_seeker_key creates increasing keys for integers."""
key_2 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '2',
})
key_15 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '15',
})
self.assertLess(key_2, key_15)
def test_job_seeker_key_increasing_regions(self) -> None:
"""Test that job_seeker_key creates increasing keys across regions."""
key_15_reg01 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg01/de_ech201512.csv',
'IDX': '15',
})
key_2_reg02 = fhs.job_seeker_key({
'__file__': '/folder/path/FHS 201512/Reg02/de_ech201512.csv',
'IDX': '2',
})
self.assertLess(key_15_reg01, key_2_reg02)
def test_extract_departement_id(self) -> None:
"""Basic usage of extract_departement_id."""
departement_id = fhs.extract_departement_id('31555')
self.assertEqual('31', departement_id)
def test_extract_departement_id_oversee(self) -> None:
"""Test extract_departement_id on an oversee locality."""
departement_id = fhs.extract_departement_id('97613')
self.assertEqual('976', departement_id)
# TODO: Add more unit tests.
class _StateAtDateTestCase(typing.NamedTuple):
name: str
date: datetime.date
expect: Optional[Mapping[str, Any]]
class JobSeekerTestCase(unittest.TestCase):
"""Unit tests for the JobSeeker class."""
def test_unemployment_a_periods(self) -> None:
"""Basic usage of unemployment_a_periods."""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 5, 22),
'CATREGR': '1',
}],
'e0': [],
})
periods = job_seeker.unemployment_a_periods()
self.assertEqual(
fhs.DateIntervals([(
datetime.date(2015, 5, 1), datetime.date(2015, 5, 22),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 5, 22),
'CATREGR': '1'})]),
periods)
def test_unemployment_a_periods_switching_to_b(self) -> None:
"""unemployment_a_periods when job seeker starts partial work."""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 12, 22),
'CATREGR': '1',
}],
'e0': [{'MOIS': '201510'}, {'MOIS': '201511'}, {'MOIS': '201512'}],
})
periods = job_seeker.unemployment_a_periods()
self.assertEqual(
fhs.DateIntervals([(
datetime.date(2015, 5, 1), datetime.date(2015, 10, 1),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 10, 1),
'CATREGR': '1',
'MOTANN': fhs.CancellationReason.STARTING_PART_TIME_WORK})]),
periods)
def test_unemployment_a_periods_useless_change(self) -> None:
"""unemployment_a_periods whith a change from CATREGR 1 to 2."""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [
{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 12, 22),
'CATREGR': '1',
'MOTINS': 'A',
'MOTANN': '07',
},
{
'DATINS': datetime.date(2015, 12, 22),
'DATANN': datetime.date(2015, 12, 31),
'CATREGR': '2',
'MOTINS': 'B',
'MOTANN': '08',
},
],
'e0': [],
})
periods = job_seeker.unemployment_a_periods()
self.assertEqual(
fhs.DateIntervals([(
datetime.date(2015, 5, 1), datetime.date(2015, 12, 31),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 12, 31),
'CATREGR': '2',
'MOTINS': 'A',
'MOTANN': '08'})]),
periods)
def test_unemployment_a_periods_switching_to_e(self) -> None:
"""unemployment_a_periods when job seeker starts a training."""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [
{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 12, 22),
'CATREGR': '1',
},
{
'DATINS': datetime.date(2015, 12, 22),
'DATANN': datetime.date(2015, 12, 31),
'CATREGR': '5',
},
],
'e0': [],
})
periods = job_seeker.unemployment_a_periods()
self.assertEqual(
fhs.DateIntervals([(
datetime.date(2015, 5, 1), datetime.date(2015, 12, 22),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 12, 22),
'CATREGR': '1'})]),
periods)
def test_unemployment_a_periods_mistakenly_kicked_out(self) -> None:
"""unemployment_a_periods with a mistaken kick-out.
Frequently some job seeker forget the required monthly updated of
their data, or do not show up at a mandatory meeting with their
counselor. When that happens Pôle Emploi kicks them out of their
register (and stop the allowance). Usually the job seeker would then
re-register very quickly to get their allowance back.
We identify periods where a job seeker left the unemployment system
for a short period, and treat such gaps as if they had never left.
"""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [
{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 7, 31),
'CATREGR': '1',
'MOTINS': 'A',
'MOTANN': '07',
},
{
'DATINS': datetime.date(2015, 8, 12),
'DATANN': datetime.date(2015, 10, 31),
'CATREGR': '1',
'MOTINS': 'B',
'MOTANN': '08',
},
{
'DATINS': datetime.date(2015, 11, 13),
'DATANN': None,
'CATREGR': '1',
'MOTINS': 'C',
'MOTANN': '12',
},
],
'e0': [],
})
# The first two periods should be merged, but not the last one.
periods = job_seeker.unemployment_a_periods(cover_holes_up_to=12)
self.assertEqual(
fhs.DateIntervals([
(datetime.date(2015, 5, 1), datetime.date(2015, 10, 31),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 10, 31),
'CATREGR': '1',
'MOTINS': 'A',
'MOTANN': '08'}),
(datetime.date(2015, 11, 13), None,
{'DATINS': datetime.date(2015, 11, 13),
'DATANN': None,
'CATREGR': '1',
'MOTINS': 'C',
'MOTANN': '12'})]),
periods)
self.assertEqual(
fhs.Period(
datetime.date(2015, 5, 1), datetime.date(2015, 10, 31),
{'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 10, 31),
'CATREGR': '1',
'MOTINS': 'A',
'MOTANN': '08'}),
periods.first_contiguous_period())
def test_state_at_date(self) -> None:
"""Basic usages of state_at_date."""
job_seeker = fhs.JobSeeker(1, '01', {
'de': [
{
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 5, 22),
'CATREGR': '1',
'ROME': 'H1234',
},
{
'DATINS': datetime.date(2015, 6, 1),
'DATANN': datetime.date(2015, 6, 22),
'CATREGR': '1',
'ROME': 'A1001',
},
],
})
first_state = {
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 5, 22),
'CATREGR': '1',
'ROME': 'H1234',
}
tests = [
_StateAtDateTestCase(
name='In the middle',
date=datetime.date(2015, 5, 10),
expect=first_state),
_StateAtDateTestCase(
name='Before unemployment',
date=datetime.date(2014, 5, 10),
expect=None),
_StateAtDateTestCase(
name='After unemployment',
date=datetime.date(2016, 5, 10),
expect=None),
_StateAtDateTestCase(
name='Between 2 unemployment periods',
date=datetime.date(2015, 5, 30),
expect=None),
_StateAtDateTestCase(
name='First day of unemployment',
date=datetime.date(2015, 5, 1),
expect=first_state),
_StateAtDateTestCase(
name='First day of employment',
date=datetime.date(2015, 5, 22),
expect=None),
]
for test in tests:
state = job_seeker.state_at_date(test.date)
self.assertEqual(test.expect, state, msg=test.name)
def test_get_rome_per_period(self) -> None:
"""Basic usages of get_rome_per_period."""
now = datetime.date(2015, 12, 1)
job_seeker = fhs.JobSeeker(1, '21', {
'de': [
{
'IDX': '1.0',
'DATINS': datetime.date(2013, 5, 1),
'DATANN': datetime.date(2013, 5, 22),
'CATREGR': '1',
'ROME': 'H1234',
'DEPCOM': 'Here',
'MOTINS': 'A',
'SEXE': '1',
},
{
'IDX': '1.0',
'DATINS': datetime.date(2015, 6, 1),
'DATANN': datetime.date(2015, 6, 22),
'CATREGR': '1',
'ROME': 'A1001',
'DEPCOM': 'There',
'MOTINS': 'A',
'SEXE': '1',
},
],
'rome': [
{
'IDX': '1.0',
'JOURDV': datetime.date(2013, 5, 1),
'JOURFV': datetime.date(2013, 5, 10),
'ROME': 'N1234',
}
]
})
periods = list(job_seeker.get_rome_per_period(12, 'abc', now))
self.assertEqual(
[
_JobseekerCriteria(
jobseeker_unique_id='1_21',
code_rome='N1234',
departement=None,
gender=None,
),
_JobseekerCriteria(
jobseeker_unique_id='1_21',
code_rome='H1234',
departement='Here',
gender='1',
),
_JobseekerCriteria(
jobseeker_unique_id='1_21',
code_rome='A1001',
departement='There',
gender='1',
)
],
periods)
def test_get_training_periods(self) -> None:
"""Basic usages of all_training_periods."""
job_seeker = fhs.JobSeeker(1, '21', {
'de': [
{
'IDX': '1.0',
'DATINS': datetime.date(2013, 5, 1),
'DATANN': datetime.date(2013, 5, 22),
'CATREGR': '1',
'ROME': 'H1234',
'DEPCOM': 'Here',
'MOTINS': 'A',
'SEXE': '1',
},
{
'IDX': '1.0',
'DATINS': datetime.date(2015, 5, 1),
'DATANN': datetime.date(2015, 5, 22),
'CATREGR': '1',
'ROME': 'B1234',
'DEPCOM': 'There',
'MOTINS': 'A',
'SEXE': '1',
}
],
'p2': [
{
'IDX': '1.0',
'P2DATDEB': datetime.date(2013, 5, 25),
'P2DATFIN': datetime.date(2013, 5, 30),
'FORMACOD': '42745',
'OBJFORM': '1',
'P2NIVFOR': 1,
},
{
'IDX': '1.0',
'P2DATDEB': datetime.date(2016, 5, 1),
'P2DATFIN': datetime.date(2016, 5, 10),
'FORMACOD': '31685',
'OBJFORM': 'A',
'P2NIVFOR': 2,
}
]
})
periods = job_seeker.all_training_periods()
expected_periods = fhs.DateIntervals([
(
datetime.date(2013, 5, 25),
datetime.date(2013, 5, 30),
{
'IDX': '1.0',
'P2DATDEB': datetime.date(2013, 5, 25),
'P2DATFIN': datetime.date(2013, 5, 30),
'FORMACOD': '42745',
'OBJFORM': '1',
'ROME': 'H1234',
'DEPCOM': 'Here',
'P2NIVFOR': 1,
}
),
(
datetime.date(2016, 5, 1),
datetime.date(2016, 5, 10),
{
'IDX': '1.0',
'P2DATDEB': datetime.date(2016, 5, 1),
'P2DATFIN': datetime.date(2016, 5, 10),
'FORMACOD': '31685',
'OBJFORM': 'A',
'ROME': 'B1234',
'DEPCOM': 'There',
'P2NIVFOR': 2,
}
),
])
self.assertEqual(expected_periods, periods)
if __name__ == '__main__':
unittest.main()
|
bayesimpact/bob-emploi
|
data_analysis/lib/test/fhs_test.py
|
Python
|
gpl-3.0
| 20,364
|
class OrderedDictionary:
def __init__(self):
self._keys=[]
self._values=[]
def __len__ (self):
return len (sef.keys)
def __getitem__ (self, key):
if self.has_key (key):
return self._values[self._keys.index(key)]
else:
return None
def __setitem__ (self, key, value):
if self.has_key (key):
self._values [self._keys.index(key)] = value
else:
self._keys.append(key)
self._values.append(value)
def __delitem__(self, key):
val = self[key]
self._keys.remove(key)
self._values.remove (val)
def has_key (self, aKey):
return aKey in self._keys
def keys (self):
return self._keys
def values (self):
return self._values
def items (self):
return map (None, self._keys, self._values)
|
ActiveState/code
|
recipes/Python/52270_OrderedDictionary/recipe-52270.py
|
Python
|
mit
| 927
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "MAVC"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
response = current.response
output = {}
s3 = response.s3
# Latest 4 Requests
#s3db = current.s3db
#list_id = "latest_reqs"
#layout = s3db.req_req_list_layout
#limit = 4
#resource = s3db.resource("req_req")
#s3db.req_customise_req_fields()
#list_fields = s3db.get_config("req_req", "list_fields")
#from s3 import FS
#resource.add_filter(FS("cancel") != True)
# Order with most recent first
#orderby = "date desc"
#output["latest_reqs"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
# Latest 4 Offers
#list_id = "latest_offers"
#layout = s3db.req_commit_list_layout
#limit = 4
#resource = s3db.resource("req_commit")
#s3db.req_customise_commit_fields()
#list_fields = s3db.get_config("req_commit", "list_fields")
#resource.add_filter(FS("cancel") != True)
# Order with most recent first
#orderby = "date desc"
#output["latest_offers"] = latest_records(resource, layout, list_id, limit, list_fields, orderby)
# What We Do
#table = s3db.cms_post
#ltable = s3db.cms_post_module
#query = (ltable.module == "default") & \
# (ltable.resource == "index") & \
# (ltable.post_id == table.id) & \
# (table.deleted != True)
#item = current.db(query).select(table.id,
# table.body,
# limitby=(0, 1)).first()
#if item:
# what_we_do = DIV(XML(item.body))
# if current.auth.s3_has_role("ADMIN"):
# if s3.crud.formstyle == "bootstrap":
# _class = "btn"
# else:
# _class = "action-btn"
# what_we_do.append(A(current.T("Edit"),
# _href=URL(c="cms", f="post",
# args=[item.id, "update"],
# vars={"module": "default",
# "resource": "index",
# }),
# _class="%s cms-edit" % _class))
#else:
# what_we_do = DIV()
# if current.auth.s3_has_role("ADMIN"):
# if s3.crud.formstyle == "bootstrap":
# _class = "btn"
# else:
# _class = "action-btn"
# what_we_do.append(A(current.T("Edit"),
# _href=URL(c="cms", f="post",
# args=["create"],
# vars={"module": "default",
# "resource": "index",
# }),
# _class="%s cms-edit" % _class))
#output["what_we_do"] = what_we_do
# Inject custom styles for homepage
s3.stylesheets.append("../themes/MAVC/homepage.css")
self._view(THEME, "index.html")
return output
# =============================================================================
def latest_records(resource, layout, list_id, limit, list_fields, orderby):
"""
Display a dataList of the latest records for a resource
"""
#orderby = resource.table[orderby]
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=limit,
list_id=list_id,
orderby=orderby,
layout=layout)
if numrows == 0:
# Empty table or just no match?
from s3.s3crud import S3CRUD
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
return data
# END =========================================================================
|
flavour/ifrc_qa
|
modules/templates/MAVC/controllers.py
|
Python
|
mit
| 5,127
|
#!/usr/bin/env python3
"""
Command line tool to bisect failing CPython tests.
Find the test_os test method which alters the environment:
./python -m test.bisect_cmd --fail-env-changed test_os
Find a reference leak in "test_os", write the list of failing tests into the
"bisect" file:
./python -m test.bisect_cmd -o bisect -R 3:3 test_os
Load an existing list of tests from a file using -i option:
./python -m test --list-cases -m FileTests test_os > tests
./python -m test.bisect_cmd -i tests test_os
"""
import argparse
import datetime
import os.path
import math
import random
import subprocess
import sys
import tempfile
import time
def write_tests(filename, tests):
with open(filename, "w") as fp:
for name in tests:
print(name, file=fp)
fp.flush()
def write_output(filename, tests):
if not filename:
return
print("Writing %s tests into %s" % (len(tests), filename))
write_tests(filename, tests)
return filename
def format_shell_args(args):
return ' '.join(args)
def list_cases(args):
cmd = [sys.executable, '-m', 'test', '--list-cases']
cmd.extend(args.test_args)
proc = subprocess.run(cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
exitcode = proc.returncode
if exitcode:
cmd = format_shell_args(cmd)
print("Failed to list tests: %s failed with exit code %s"
% (cmd, exitcode))
sys.exit(exitcode)
tests = proc.stdout.splitlines()
return tests
def run_tests(args, tests, huntrleaks=None):
tmp = tempfile.mktemp()
try:
write_tests(tmp, tests)
cmd = [sys.executable, '-m', 'test', '--matchfile', tmp]
cmd.extend(args.test_args)
print("+ %s" % format_shell_args(cmd))
proc = subprocess.run(cmd)
return proc.returncode
finally:
if os.path.exists(tmp):
os.unlink(tmp)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input',
help='Test names produced by --list-tests written '
'into a file. If not set, run --list-tests')
parser.add_argument('-o', '--output',
help='Result of the bisection')
parser.add_argument('-n', '--max-tests', type=int, default=1,
help='Maximum number of tests to stop the bisection '
'(default: 1)')
parser.add_argument('-N', '--max-iter', type=int, default=100,
help='Maximum number of bisection iterations '
'(default: 100)')
# FIXME: document that following arguments are test arguments
args, test_args = parser.parse_known_args()
args.test_args = test_args
return args
def main():
args = parse_args()
if args.input:
with open(args.input) as fp:
tests = [line.strip() for line in fp]
else:
tests = list_cases(args)
print("Start bisection with %s tests" % len(tests))
print("Test arguments: %s" % format_shell_args(args.test_args))
print("Bisection will stop when getting %s or less tests "
"(-n/--max-tests option), or after %s iterations "
"(-N/--max-iter option)"
% (args.max_tests, args.max_iter))
output = write_output(args.output, tests)
print()
start_time = time.monotonic()
iteration = 1
try:
while len(tests) > args.max_tests and iteration <= args.max_iter:
ntest = len(tests)
ntest = max(ntest // 2, 1)
subtests = random.sample(tests, ntest)
print("[+] Iteration %s: run %s tests/%s"
% (iteration, len(subtests), len(tests)))
print()
exitcode = run_tests(args, subtests)
print("ran %s tests/%s" % (ntest, len(tests)))
print("exit", exitcode)
if exitcode:
print("Tests failed: continuing with this subtest")
tests = subtests
output = write_output(args.output, tests)
else:
print("Tests succeeded: skipping this subtest, trying a new subset")
print()
iteration += 1
except KeyboardInterrupt:
print()
print("Bisection interrupted!")
print()
print("Tests (%s):" % len(tests))
for test in tests:
print("* %s" % test)
print()
if output:
print("Output written into %s" % output)
dt = math.ceil(time.monotonic() - start_time)
if len(tests) <= args.max_tests:
print("Bisection completed in %s iterations and %s"
% (iteration, datetime.timedelta(seconds=dt)))
sys.exit(1)
else:
print("Bisection failed after %s iterations and %s"
% (iteration, datetime.timedelta(seconds=dt)))
if __name__ == "__main__":
main()
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/bisect_cmd.py
|
Python
|
apache-2.0
| 4,967
|
"""Recommender model."""
from __future__ import print_function
import sys
import numpy as np
from theano import config, function, shared
import theano.tensor as T
__author__ = "Gianluca Corrado"
__copyright__ = "Copyright 2016, Gianluca Corrado"
__license__ = "MIT"
__maintainer__ = "Gianluca Corrado"
__email__ = "gianluca.corrado@unitn.it"
__status__ = "Production"
class Model():
"""Factorization model."""
def __init__(self, sp, sr, kp, kr, irange=0.01, learning_rate=0.01,
lambda_reg=0.01, verbose=True, seed=1234):
"""
Constructor.
Parameters
----------
sp : int
Number of protein features.
sr : int
Number of RNA features.
kp : int
Size of the protein latent space.
kr : int
Size of the RNA latent space.
irange : float (default : 0.01)
Initialization range for the model weights.
learning_rate : float (default : 0.01)
Learning rate for the weights update.
lambda_reg : (default : 0.01)
Lambda parameter for the regularization.
verbose : bool (default : True)
Print information at STDOUT.
seed : int (default : 1234)
Seed for random number generator.
"""
if verbose:
print("Compiling model...", end=' ')
sys.stdout.flush()
self.learning_rate = learning_rate
self.lambda_reg = lambda_reg
np.random.seed(seed)
# explictit features for proteins
fp = T.matrix("Fp", dtype=config.floatX)
# explictit features for RNAs
fr = T.matrix("Fr", dtype=config.floatX)
# Correct label
y = T.vector("y")
# projection matrix for proteins
self.Ap = shared(((.5 - np.random.rand(kp, sp)) *
irange).astype(config.floatX), name="Ap")
self.bp = shared(((.5 - np.random.rand(kp)) *
irange).astype(config.floatX), name="bp")
# projection matrix for RNAs
self.Ar = shared(((.5 - np.random.rand(kr, sr)) *
irange).astype(config.floatX), name="Ar")
self.br = shared(((.5 - np.random.rand(kr)) *
irange).astype(config.floatX), name="br")
# generalization matrix
self.B = shared(((.5 - np.random.rand(kp, kr)) *
irange).astype(config.floatX), name="B")
# Latent space for proteins
p = T.nnet.sigmoid(T.dot(fp, self.Ap.T) + self.bp)
# Latent space for RNAs
r = T.nnet.sigmoid(T.dot(fr, self.Ar.T) + self.br)
# Predicted output
y_hat = T.nnet.sigmoid(T.sum(T.dot(p, self.B) * r, axis=1))
def _regularization():
"""Normalized Frobenius norm."""
norm_proteins = self.Ap.norm(2) + self.bp.norm(2)
norm_rnas = self.Ar.norm(2) + self.br.norm(2)
norm_b = self.B.norm(2)
num_proteins = self.Ap.flatten().shape[0] + self.bp.shape[0]
num_rnas = self.Ar.flatten().shape[0] + self.br.shape[0]
num_b = self.B.flatten().shape[0]
return (norm_proteins / num_proteins + norm_rnas / num_rnas +
norm_b / num_b) / 3
# mean squared error
cost_ = (T.sqr(y - y_hat)).mean()
reg = lambda_reg * _regularization()
cost = cost_ + reg
# compute sgd updates
g_Ap, g_bp, g_Ar, g_br, g_B = T.grad(
cost, [self.Ap, self.bp, self.Ar, self.br, self.B])
updates = ((self.Ap, self.Ap - learning_rate * g_Ap),
(self.bp, self.bp - learning_rate * g_bp),
(self.Ar, self.Ar - learning_rate * g_Ar),
(self.br, self.br - learning_rate * g_br),
(self.B, self.B - learning_rate * g_B))
# training step
self.train = function(
inputs=[fp, fr, y],
outputs=[y_hat, cost],
updates=updates)
# test
self.test = function(
inputs=[fp, fr, y],
outputs=[y_hat, cost])
# predict
self.predict = function(
inputs=[fp, fr],
outputs=y_hat)
if verbose:
print("Done.")
sys.stdout.flush()
def get_params(self):
"""Return the parameters of the model."""
return {'Ap': self.Ap.get_value(), 'bp': self.bp.get_value(),
'Ar': self.Ar.get_value(), 'br': self.br.get_value(),
'B': self.B.get_value()}
|
gianlucacorrado/RNAcommender
|
rnacommender/model.py
|
Python
|
mit
| 4,612
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: charset.py
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec']
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
QP = 1
BASE64 = 2
SHORTEST = 3
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
CHARSETS = {'iso-8859-1': (
QP, QP, None),
'iso-8859-2': (
QP, QP, None),
'iso-8859-3': (
QP, QP, None),
'iso-8859-4': (
QP, QP, None),
'iso-8859-9': (
QP, QP, None),
'iso-8859-10': (
QP, QP, None),
'iso-8859-13': (
QP, QP, None),
'iso-8859-14': (
QP, QP, None),
'iso-8859-15': (
QP, QP, None),
'iso-8859-16': (
QP, QP, None),
'windows-1252': (
QP, QP, None),
'viscii': (
QP, QP, None),
'us-ascii': (None, None, None),
'big5': (
BASE64, BASE64, None),
'gb2312': (
BASE64, BASE64, None),
'euc-jp': (
BASE64, None, 'iso-2022-jp'),
'shift_jis': (
BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (
BASE64, None, None),
'koi8-r': (
BASE64, BASE64, None),
'utf-8': (
SHORTEST, BASE64, 'utf-8'),
'8bit': (
None, BASE64, 'utf-8')
}
ALIASES = {'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10': 'iso-8859-16',
'latin-10': 'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii'
}
CODEC_MAP = {'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
'us-ascii': None
}
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (
header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
henc, benc, conv = CHARSETS.get(self.input_charset, (
SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
return
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
if self.body_encoding == QP:
return 'quoted-printable'
else:
if self.body_encoding == BASE64:
return 'base64'
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
else:
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
return s
return
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
else:
try:
return ustr.encode(codec, 'replace')
except LookupError:
return ustr
return
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
else:
if self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
else:
if self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
return None
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
else:
if self.body_encoding is QP:
return email.quoprimime.body_encode(s)
return s
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/email/charset.py
|
Python
|
unlicense
| 14,131
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance.common.exception as exc
import glance.common.jsonpatchvalidator as jpv
import glance.tests.utils as utils
class TestValidator(jpv.JsonPatchValidatorMixin):
def __init__(self, methods_allowed=None):
if methods_allowed is None:
methods_allowed = ["replace", "add"]
super(TestValidator, self).__init__(methods_allowed)
class TestJsonPatchMixin(utils.BaseTestCase):
def test_body_validation(self):
validator = TestValidator()
validator.validate_body(
[{"op": "replace", "path": "/param", "value": "ok"}])
# invalid if not a list of [{"op": "", "path": "", "value": ""}]
# is passed
self.assertRaises(exc.JsonPatchException, validator.validate_body,
{"op": "replace", "path": "/me",
"value": "should be a list"})
def test_value_validation(self):
# a string, a list and a dict are valid value types
validator = TestValidator()
validator.validate_body(
[{"op": "replace", "path": "/param", "value": "ok string"}])
validator.validate_body(
[{"op": "replace", "path": "/param",
"value": ["ok list", "really ok"]}])
validator.validate_body(
[{"op": "replace", "path": "/param", "value": {"ok": "dict"}}])
def test_op_validation(self):
validator = TestValidator(methods_allowed=["replace", "add", "copy"])
validator.validate_body(
[{"op": "copy", "path": "/param", "value": "ok"},
{"op": "replace", "path": "/param/1", "value": "ok"}])
self.assertRaises(
exc.JsonPatchException, validator.validate_body,
[{"op": "test", "path": "/param", "value": "not allowed"}])
self.assertRaises(exc.JsonPatchException, validator.validate_body,
[{"op": "nosuchmethodatall", "path": "/param",
"value": "no way"}])
def test_path_validation(self):
validator = TestValidator()
bad_body_part = {"op": "add", "value": "bad path"}
for bad_path in ["/param/", "param", "//param", "/param~2", "/param~"]:
bad_body_part["path"] = bad_path
bad_body = [bad_body_part]
self.assertRaises(exc.JsonPatchException,
validator.validate_body, bad_body)
ok_body = [{"op": "add", "value": "some value",
"path": "/param~1/param~0"}]
body = validator.validate_body(ok_body)[0]
self.assertEqual("param//param~", body["path"])
|
dims/glance
|
glance/tests/unit/test_jsonpatchmixin.py
|
Python
|
apache-2.0
| 3,189
|
# coding=utf-8
from unittest2 import TestCase
from syntaxnet_wrapper.src.utils.pos_aggregation import pos_aggregate
class TestPostAggregation(TestCase):
def test_pos_aggregate(self):
test_values = [
([{u'1': {u'index': 1, u'token': u'il', u'pos': u'_',
u'feats': u'Gender=Masc|Number=Sing|Person=3|PronType=Prs|fPOS=PRON++',
u'label': u'PRON'},
u'3': {u'index': 3, u'token': u'et', u'pos': u'_', u'feats': u'fPOS=CONJ++',
u'label': u'CONJ'},
u'2': {u'index': 2, u'token': u's\u2019arr\xeata', u'pos': u'_',
u'feats': u'Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin|fPOS=VERB++',
u'label': u'VERB'}, u'5': {u'index': 5, u'token': u'sa', u'pos': u'_',
u'feats': u'Gender=Fem|Number=Sing|fPOS=DET++',
u'label': u'PRON'},
u'4': {u'index': 4, u'token': u'sortit', u'pos': u'_',
u'feats': u'Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin|fPOS=VERB++',
u'label': u'VERB'},
u'7': {u'index': 7, u'token': u'de', u'pos': u'_', u'feats': u'fPOS=ADP++',
u'label': u'ADP'}, u'6': {u'index': 6, u'token': u'montre', u'pos': u'_',
u'feats': u'Gender=Fem|Number=Sing|fPOS=NOUN++',
u'label': u'VERB'},
u'9': {u'index': 9, u'token': u'poche.', u'pos': u'_',
u'feats': u'Gender=Fem|Number=Sing|fPOS=NOUN++', u'label': u'NOUN'},
u'8': {u'index': 8, u'token': u'sa', u'pos': u'_',
u'feats': u'Gender=Fem|Number=Sing|fPOS=DET++', u'label': u'DET'}}],
{'animacy': {'inan': 0, 'hum': 0, 'nhum': 0, 'anim': 0},
'verbform': {'ger': 0, 'conv': 0, 'gdv': 0, 'vnoun': 0, 'part': 0, 'sup': 0, 'inf': 0, 'fin': 2},
'number': {'count': 0, 'tri': 0, 'grpl': 0, 'inv': 0, 'ptan': 0, 'coll': 0, 'pauc': 0, 'dual': 0,
'sing': 7, 'grpa': 0, 'plur': 0},
'aspect': {'perf': 0, 'hab': 0, 'prosp': 0, 'iter': 0, 'imp': 0, 'prog': 0},
'polarity': {'neg': 0, 'pos': 0},
'ratio': {'ratio_adj_propn': 0, 'ratio_adv_propn': 0, 'ratio_adj_verb': 0.0, 'ratio_adj_pron': 0.0,
'ratio_adj_adv': 0, 'ratio_adv_pron': 0.0, 'ratio_adv_det': 0.0, 'ratio_propn_verb': 0.0,
'ratio_propn_det': 0.0},
'mood': {'opt': 0, 'qot': 0, 'sub': 0, 'jus': 0, 'pot': 0, 'des': 0, 'nec': 0, 'imp': 0, 'cnd': 0,
'prp': 0, 'ind': 2, 'adm': 0}, 'definite': {'ind': 0, 'cons': 0, 'com': 0, 'spec': 0, 'def': 0},
'numtype': {'frac': 0, 'range': 0, 'card': 0, 'sets': 0, 'dist': 0, 'ord': 0, 'mult': 0}, 'reflex': 0,
'degree': {'abs': 0, 'equ': 0, 'pos': 0, 'sup': 0, 'cmp': 0},
'polite': {'humb': 0, 'infm': 0, 'form': 0, 'elev': 0},
'upos': {'adv': 0, 'verb': 3, 'noun': 1, 'adp': 1, 'punct': 0, 'sconj': 0, 'propn': 0, 'det': 1, 'sym': 0,
'intj': 0, 'pron': 2, 'num': 0, 'x': 0, 'cconj': 0, 'aux': 0, 'part': 0, 'adj': 0},
'tense': {'past': 2, 'imp': 0, 'fut': 0, 'pqp': 0, 'pres': 0}, 'abbr': 0, 'poss': 0,
'prontype': {'art': 0, 'exc': 0, 'int': 0, 'neg': 0, 'rcp': 0, 'tot': 0, 'rel': 0, 'dem': 0, 'prs': 1,
'ind': 0, 'emp': 0}, 'evident': {'fh': 0, 'nfh': 0},
'gender': {'neut': 0, 'masc': 1, 'fem': 4, 'com': 0}, 'foreign': 0,
'person': {'1': 0, '0': 0, '3': 3, '2': 0, '4': 0},
'voice': {'antip': 0, 'inv': 0, 'pass': 0, 'rcp': 0, 'mid': 0, 'cau': 0, 'act': 0, 'dir': 0}}
),
(
[{u'1': {u'index': 1, u'token': u'il', u'pos': u'_', u'feats': u'reflex', u'label': u'PRON'}}],
{'animacy': {'inan': 0, 'hum': 0, 'nhum': 0, 'anim': 0},
'verbform': {'ger': 0, 'conv': 0, 'gdv': 0, 'vnoun': 0, 'part': 0, 'sup': 0, 'inf': 0, 'fin': 0},
'number': {'count': 0, 'tri': 0, 'grpl': 0, 'inv': 0, 'ptan': 0, 'coll': 0, 'pauc': 0, 'dual': 0,
'sing': 0, 'grpa': 0, 'plur': 0},
'aspect': {'perf': 0, 'hab': 0, 'prosp': 0, 'iter': 0, 'imp': 0, 'prog': 0},
'polarity': {'neg': 0, 'pos': 0},
'ratio': {'ratio_adj_propn': 0, 'ratio_adv_propn': 0, 'ratio_adj_verb': 0, 'ratio_adj_pron': 0.0,
'ratio_adj_adv': 0, 'ratio_adv_pron': 0.0, 'ratio_adv_det': 0, 'ratio_propn_verb': 0,
'ratio_propn_det': 0},
'mood': {'opt': 0, 'qot': 0, 'sub': 0, 'jus': 0, 'pot': 0, 'des': 0, 'nec': 0, 'imp': 0, 'cnd': 0,
'prp': 0, 'ind': 0, 'adm': 0},
'definite': {'ind': 0, 'cons': 0, 'com': 0, 'spec': 0, 'def': 0},
'numtype': {'frac': 0, 'range': 0, 'card': 0, 'sets': 0, 'dist': 0, 'ord': 0, 'mult': 0}, 'reflex': 1,
'degree': {'abs': 0, 'equ': 0, 'pos': 0, 'sup': 0, 'cmp': 0},
'polite': {'humb': 0, 'infm': 0, 'form': 0, 'elev': 0},
'upos': {'adv': 0, 'verb': 0, 'noun': 0, 'adp': 0, 'punct': 0, 'sconj': 0, 'propn': 0, 'det': 0,
'sym': 0, 'intj': 0, 'pron': 1, 'num': 0, 'x': 0, 'cconj': 0, 'aux': 0, 'part': 0, 'adj': 0},
'tense': {'past': 0, 'imp': 0, 'fut': 0, 'pqp': 0, 'pres': 0}, 'abbr': 0, 'poss': 0,
'prontype': {'art': 0, 'exc': 0, 'int': 0, 'neg': 0, 'rcp': 0, 'tot': 0, 'rel': 0, 'dem': 0, 'prs': 0,
'ind': 0, 'emp': 0}, 'evident': {'fh': 0, 'nfh': 0},
'gender': {'neut': 0, 'masc': 0, 'fem': 0, 'com': 0}, 'foreign': 0,
'person': {'1': 0, '0': 0, '3': 0, '2': 0, '4': 0},
'voice': {'antip': 0, 'inv': 0, 'pass': 0, 'rcp': 0, 'mid': 0, 'cau': 0, 'act': 0, 'dir': 0}}
),
([{u'1': {u'index': 1, u'token': u'il', u'pos': u'_', u'feats': u'_', u'label': u'PRON'}}],
{'animacy': {'inan': 0, 'hum': 0, 'nhum': 0, 'anim': 0},
'verbform': {'ger': 0, 'conv': 0, 'gdv': 0, 'vnoun': 0, 'part': 0, 'sup': 0, 'inf': 0, 'fin': 0},
'number': {'count': 0, 'tri': 0, 'grpl': 0, 'inv': 0, 'ptan': 0, 'coll': 0, 'pauc': 0, 'dual': 0,
'sing': 0, 'grpa': 0, 'plur': 0},
'aspect': {'perf': 0, 'hab': 0, 'prosp': 0, 'iter': 0, 'imp': 0, 'prog': 0},
'polarity': {'neg': 0, 'pos': 0},
'ratio': {'ratio_adj_propn': 0, 'ratio_adv_propn': 0, 'ratio_adj_verb': 0, 'ratio_adj_pron': 0.0,
'ratio_adj_adv': 0, 'ratio_adv_pron': 0.0, 'ratio_adv_det': 0, 'ratio_propn_verb': 0,
'ratio_propn_det': 0},
'mood': {'opt': 0, 'qot': 0, 'sub': 0, 'jus': 0, 'pot': 0, 'des': 0, 'nec': 0, 'imp': 0, 'cnd': 0,
'prp': 0, 'ind': 0, 'adm': 0}, 'definite': {'ind': 0, 'cons': 0, 'com': 0, 'spec': 0, 'def': 0},
'numtype': {'frac': 0, 'range': 0, 'card': 0, 'sets': 0, 'dist': 0, 'ord': 0, 'mult': 0}, 'reflex': 0,
'degree': {'abs': 0, 'equ': 0, 'pos': 0, 'sup': 0, 'cmp': 0},
'polite': {'humb': 0, 'infm': 0, 'form': 0, 'elev': 0},
'upos': {'adv': 0, 'verb': 0, 'noun': 0, 'adp': 0, 'punct': 0, 'sconj': 0, 'propn': 0, 'det': 0, 'sym': 0,
'intj': 0, 'pron': 1, 'num': 0, 'x': 0, 'cconj': 0, 'aux': 0, 'part': 0, 'adj': 0},
'tense': {'past': 0, 'imp': 0, 'fut': 0, 'pqp': 0, 'pres': 0}, 'abbr': 0, 'poss': 0,
'prontype': {'art': 0, 'exc': 0, 'int': 0, 'neg': 0, 'rcp': 0, 'tot': 0, 'rel': 0, 'dem': 0, 'prs': 0,
'ind': 0, 'emp': 0}, 'evident': {'fh': 0, 'nfh': 0},
'gender': {'neut': 0, 'masc': 0, 'fem': 0, 'com': 0}, 'foreign': 0,
'person': {'1': 0, '0': 0, '3': 0, '2': 0, '4': 0},
'voice': {'antip': 0, 'inv': 0, 'pass': 0, 'rcp': 0, 'mid': 0, 'cau': 0, 'act': 0, 'dir': 0}})
]
for input_test, result in test_values:
self.assertDictEqual(result, pos_aggregate(input_test))
|
short-edition/syntaxnet-wrapper
|
syntaxnet_wrapper/src/utils/test/test_pos_aggregation.py
|
Python
|
apache-2.0
| 8,608
|
# Copyright 2013 Pau Haro Negre
# based on C++ code by Carl Staelin Copyright 2009-2011
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cognon_extended import Neuron
from cognon_extended import WordSet
from math import log
from multiprocessing import Pool
import numpy as np
import random
class Alice(object):
def train(self, neuron, wordset):
neuron.start_training()
for word in wordset.words:
fired = neuron.train(word)
neuron.finish_training()
class Bob(object):
def __init__(self):
self.true_true = 0 # trained and fired (ok)
self.true_false = 0 # trained but not fired (false negative)
self.false_true = 0 # not trained but fired (false positive)
self.false_false = 0 # not trained and not fired (ok)
def test(self, neuron, train_wordset, test_wordset):
# Check the training set
for word in train_wordset.words:
fired, delay, container = neuron.expose(word)
if fired:
self.true_true += 1
else:
self.true_false += 1
# Check the test set
#num_active = len(train_wordset.words[0].synapses[0])
#test_wordset = Wordset(num_test_words, neuron.S0, neuron.D1, num_active)
for word in test_wordset.words:
fired, delay, container = neuron.expose(word)
if fired:
self.false_true += 1
else:
self.false_false += 1
class Configuration(object):
def __init__(self):
self.neuron_params()
self.test_params()
def neuron_params(self, C = 1, D1 = 4, D2 = 7, Q = 40, G = 2, H = 5):
self.H = H # Num. of synapses needed to fire a neuron
self.G = G # Ratio of strong synapse strength to weak synapse s.
self.C = C # Num. of dendrite compartments
self.D1 = D1 # Num. of posible time slots where spikes can happen
self.D2 = D2 # Num. of time delays available between two layers
self.Q = Q # Q = S0/(H*R*C)
def test_params(self, num_active = 4, R = None, w = 100, num_test_words = 0):
self.num_active = num_active # Num. of active synapses per word
self.R = R # Avg. num. of patterns per afferent synapse spike
self.w = w # Num. of words to train the neuron with
self.num_test_words = num_test_words # Num. of words to test
@property
def S0(self):
if self.R:
return int(self.Q * self.H * self.C * self.R)
else:
return int(self.Q * self.H * self.C)
class Cognon(object):
def __call__(self, config):
return self.run_experiment(config)
def run_configuration(self, config, repetitions):
# Ensure that at least 10,000 words are learnt
MIN_LEARN_WORDS = 10000
MIN_LEARN_WORDS = 1
if repetitions * config.w < MIN_LEARN_WORDS:
N = MIN_LEARN_WORDS/config.w
else:
N = repetitions
# Ensure that at least 1,000,000 words are tested
MIN_TEST_WORDS = 1000000
if not config.num_test_words:
config.num_test_words = MIN_TEST_WORDS/N
# Run all the experiments
#values = [self.run_experiment(config) for i in xrange(N)]
pool = Pool(processes=20)
values = pool.map(Cognon(), [config,]*N)
# Store the results in a NumPy structured array
names = ('pL', 'pF', 'L')
types = [np.float64,] * len(values)
r = np.array(values, dtype = zip(names, types))
return r
def run_experiment(self, cfg):
# create a neuron instance with the provided parameters
neuron = Neuron(cfg.S0, cfg.H, cfg.G, cfg.C, cfg.D1, cfg.D2)
# create the training and test wordsets
train_wordset = WordSet(cfg.w, cfg.S0, cfg.D1, cfg.num_active, cfg.R)
test_wordset = WordSet(cfg.num_test_words, cfg.S0, cfg.D1,
cfg.num_active, cfg.R)
# create Alice instance to train the neuron
alice = Alice()
alice.train(neuron, train_wordset)
# create a Bob instance to test the neuron
bob = Bob()
bob.test(neuron, train_wordset, test_wordset)
# results
pL = bob.true_true/float(cfg.w)
pF = bob.false_true/float(cfg.num_test_words)
# L = w*((1-pL)*log2((1-pL)/(1-pF)) + pL*log2(pL/pF)) bits
L = 0
if pL == 1.0:
if pF != 0:
L = -cfg.w*log(pF)/log(2.0)
else:
L = cfg.w
elif pL > pF:
L = cfg.w/log(2.0) * \
(log(1.0 - pL) - log(1.0 - pF) +
pL * (log(1.0 - pF) - log(1.0 - pL) + log(pL) - log(pF)))
return pL, pF, L
|
pauh/neuron
|
run_experiment.py
|
Python
|
apache-2.0
| 5,390
|
# -*- coding: utf-8; mode: Python -*-
# ocitysmap, city map and street index generator from OpenStreetMap data
# Copyright (C) 2010 David Decotigny
# Copyright (C) 2010 Frédéric Lehobey
# Copyright (C) 2010 Pierre Mauduit
# Copyright (C) 2010 David Mentré
# Copyright (C) 2010 Maxime Petazzoni
# Copyright (C) 2010 Thomas Petazzoni
# Copyright (C) 2010 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import gettext
def _install_language(language, locale_path):
t = gettext.translation(domain='ocitysmap',
localedir=locale_path,
languages=[language],
fallback=True)
t.install(unicode=True)
class i18n:
"""Functions needed to be implemented for a new language.
See i18n_fr_FR_UTF8 below for an example. """
def language_code(self):
pass
def user_readable_street(self, name):
pass
def first_letter_equal(self, a, b):
pass
def isrtl(self):
return False
class i18n_template_code_CODE(i18n):
def __init__(self, language, locale_path):
"""Install the _() function for the chosen locale other
object initialisation"""
# It's important to convert to str() here because the map_language
# value coming from the database is Unicode, but setlocale() needs a
# non-unicode string as the locale name, otherwise it thinks it's a
# locale tuple.
self.language = str(language)
_install_language(language, locale_path)
def language_code(self):
"""returns the language code of the specific language
supported, e.g. fr_FR.UTF-8"""
return self.language
def user_readable_street(self, name):
""" transforms a street name into a suitable form for
the map index, e.g. Paris (Rue de) for French"""
return name
def first_letter_equal(self, a, b):
"""returns True if the letters a and b are equal in the map index,
e.g. É and E are equals in French map index"""
return a == b
def isrtl(self):
return False
class i18n_fr_generic(i18n):
APPELLATIONS = [ u"Accès", u"Allée", u"Allées", u"Autoroute", u"Avenue", u"Barrage",
u"Boulevard", u"Carrefour", u"Chaussée", u"Chemin",
u"Cheminement", u"Cale", u"Cales", u"Cavée", u"Cité",
u"Clos", u"Coin", u"Côte", u"Cour", u"Cours", u"Descente",
u"Degré", u"Escalier",
u"Escaliers", u"Esplanade", u"Funiculaire",
u"Giratoire", u"Hameau", u"Impasse", u"Jardin",
u"Jardins", u"Liaison", u"Mail", u"Montée", u"Môle",
u"Parc", u"Passage", u"Passerelle", u"Passerelles",
u"Place", u"Placette", u"Pont", u"Promenade",
u"Petite Avenue", u"Petite Rue", u"Quai",
u"Rampe", u"Rang", u"Résidence", u"Rond-Point",
u"Route forestière", u"Route", u"Rue", u"Ruelle",
u"Square", u"Sente", u"Sentier", u"Sentiers", u"Terre-Plein",
u"Télécabine", u"Traboule", u"Traverse", u"Tunnel",
u"Venelle", u"Villa", u"Virage"
]
DETERMINANTS = [ u" des", u" du", u" de la", u" de l'",
u" de", u" d'", u" aux", u""
]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_it_generic(i18n):
APPELLATIONS = [ u"Via", u"Viale", u"Piazza", u"Scali", u"Strada", u"Largo",
u"Corso", u"Viale", u"Calle", u"Sottoportico",
u"Sottoportego", u"Vicolo", u"Piazzetta" ]
DETERMINANTS = [ u" delle", u" dell'", u" dei", u" degli",
u" della", u" del", u" di", u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_es_generic(i18n):
APPELLATIONS = [ u"Avenida", u"Avinguda", u"Calle", u"Callejón",
u"Calzada", u"Camino", u"Camí", u"Carrer", u"Carretera",
u"Glorieta", u"Parque", u"Pasaje", u"Pasarela", u"Paseo", u"Plaza",
u"Plaça", u"Privada", u"Puente", u"Ronda", u"Salida", u"Travesia" ]
DETERMINANTS = [ u" de", u" de la", u" del", u" de las",
u" dels", u" de los", u" d'", u" de l'", u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
N_ACCENT = re.compile(ur"[ñ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
s = self.N_ACCENT.sub("n", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_ca_generic(i18n):
APPELLATIONS = [ # Catalan
u"Autopista", u"Autovia", u"Avinguda",
u"Baixada", u"Barranc", u"Barri", u"Barriada",
u"Biblioteca", u"Carrer", u"Carreró", u"Carretera",
u"Cantonada", u"Església", u"Estació", u"Hospital",
u"Monestir", u"Monument", u"Museu", u"Passatge",
u"Passeig", u"Plaça", u"Planta", u"Polígon",
u"Pujada", u"Rambla", u"Ronda", u"Travessera",
u"Travessia", u"Urbanització", u"Via",
u"Avenida", u"Calle", u"Camino", u"Plaza",
# Spanish (being distinct from Catalan)
u"Acceso", u"Acequia", u"Alameda", u"Alquería",
u"Andador", u"Angosta", u"Apartamentos", u"Apeadero",
u"Arboleda", u"Arrabal", u"Arroyo", u"Autovía",
u"Avenida", u"Bajada", u"Balneario", u"Banda",
u"Barranco", u"Barranquil", u"Barrio", u"Bloque",
u"Brazal", u"Bulevar", u"Calle", u"Calleja",
u"Callejón", u"Callejuela", u"Callizo", u"Calzada",
u"Camino", u"Camping", u"Cantera", u"Cantina",
u"Cantón", u"Carrera", u"Carrero", u"Carreterín",
u"Carretil", u"Carril", u"Caserío", u"Chalet",
u"Cinturón", u"Circunvalación", u"Cobertizo",
u"Colonia", u"Complejo", u"Conjunto", u"Convento",
u"Cooperativa", u"Corral", u"Corralillo", u"Corredor",
u"Cortijo", u"Costanilla", u"Costera", u"Cuadra",
u"Cuesta", u"Dehesa", u"Demarcación", u"Diagonal",
u"Diseminado", u"Edificio", u"Empresa", u"Entrada",
u"Escalera", u"Escalinata", u"Espalda", u"Estación",
u"Estrada", u"Explanada", u"Extramuros", u"Extrarradio",
u"Fábrica", u"Galería", u"Glorieta", u"Gran Vía",
u"Granja", u"Hipódromo", u"Jardín", u"Ladera",
u"Llanura", u"Malecón", u"Mercado", u"Mirador",
u"Monasterio", u"Muelle", u"Núcleo", u"Palacio",
u"Pantano", u"Paraje", u"Parque", u"Particular",
u"Partida", u"Pasadizo", u"Pasaje", u"Paseo",
u"Paseo marítimo", u"Pasillo", u"Plaza", u"Plazoleta",
u"Plazuela", u"Poblado", u"Polígono", u"Polígono industrial",
u"Portal", u"Pórtico", u"Portillo", u"Prazuela",
u"Prolongación", u"Pueblo", u"Puente", u"Puerta",
u"Puerto", u"Punto kilométrico", u"Rampla",
u"Residencial", u"Ribera", u"Rincón", u"Rinconada",
u"Sanatorio", u"Santuario", u"Sector", u"Sendera",
u"Sendero", u"Subida", u"Torrente", u"Tránsito",
u"Transversal", u"Trasera", u"Travesía", u"Urbanización",
u"Vecindario", u"Vereda", u"Viaducto", u"Viviendas",
# French (being distinct from Catalan and Spanish)
u"Accès", u"Allée", u"Allées", u"Autoroute", u"Avenue", u"Barrage",
u"Boulevard", u"Carrefour", u"Chaussée", u"Chemin",
u"Cheminement", u"Cale", u"Cales", u"Cavée", u"Cité",
u"Clos", u"Coin", u"Côte", u"Cour", u"Cours", u"Descente",
u"Degré", u"Escalier",
u"Escaliers", u"Esplanade", u"Funiculaire",
u"Giratoire", u"Hameau", u"Impasse", u"Jardin",
u"Jardins", u"Liaison", u"Mail", u"Montée", u"Môle",
u"Parc", u"Passage", u"Passerelle", u"Passerelles",
u"Place", u"Placette", u"Pont", u"Promenade",
u"Petite Avenue", u"Petite Rue", u"Quai",
u"Rampe", u"Rang", u"Résidence", u"Rond-Point",
u"Route forestière", u"Route", u"Rue", u"Ruelle",
u"Square", u"Sente", u"Sentier", u"Sentiers", u"Terre-Plein",
u"Télécabine", u"Traboule", u"Traverse", u"Tunnel",
u"Venelle", u"Villa", u"Virage"
]
DETERMINANTS = [ # Catalan
u" de", u" de la", u" del", u" dels", u" d'",
u" de l'", u" de sa", u" de son", u" de s'",
u" de ses", u" d'en", u" de na", u" de n'",
# Spanish (being distinct from Catalan)
u" de las", u" de los",
# French (being distinct from Catalan and Spanish)
u" du",
u""]
DETERMINANTS = [ u" de", u" de la", u" del", u" de las",
u" dels", u" de los", u" d'", u" de l'", u"de sa", u"de son", u"de s'",
u"de ses", u"d'en", u"de na", u"de n'", u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
N_ACCENT = re.compile(ur"[ñ]", re.IGNORECASE | re.UNICODE)
C_ACCENT = re.compile(ur"[ç]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
s = self.N_ACCENT.sub("n", s)
s = self.C_ACCENT.sub("c", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_pt_br_generic(i18n):
APPELLATIONS = [ u"Aeroporto", u"Alameda", u"Área", u"Avenida",
u"Campo", u"Chácara", u"Colônia",
u"Condomínio", u"Conjunto", u"Distrito", u"Esplanada", u"Estação",
u"Estrada", u"Favela", u"Fazenda",
u"Feira", u"Jardim", u"Ladeira", u"Lago",
u"Lagoa", u"Largo", u"Loteamento", u"Morro", u"Núcleo",
u"Parque", u"Passarela", u"Pátio", u"Praça", u"Quadra",
u"Recanto", u"Residencial", u"Rua",
u"Setor", u"Sítio", u"Travessa", u"Trecho", u"Trevo",
u"Vale", u"Vereda", u"Via", u"Viaduto", u"Viela",
u"Vila" ]
DETERMINANTS = [ u" do", u" da", u" dos", u" das", u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_ar_generic(i18n):
APPELLATIONS = [ u"شارع", u"طريق", u"زقاق", u"نهج", u"جادة",
u"ممر", u"حارة",
u"كوبري", u"كوبرى", u"جسر", u"مطلع", u"منزل",
u"مفرق", u"ملف", u"تقاطع",
u"ساحل",
u"ميدان", u"ساحة", u"دوار" ]
DETERMINANTS = [ u" ال", u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
A_ACCENT = re.compile(ur"[اإآ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.A_ACCENT.sub("أ", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
def isrtl(self):
return True
class i18n_ru_generic(i18n):
APPELLATIONS = [ u"ул", u"бул", u"пер", u"пр", u"улица", u"бульвар", u"проезд",
u"проспект", u"площадь", u"сквер", u"парк" ]
# only "ул." and "пер." are recommended shortenings, however other words can
# occur shortened.
#
# http://bit.ly/6ASISp (OSM wiki)
#
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)\.?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS)), re.IGNORECASE
| re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
# usually, there are no accents in russian names, only "ё" sometimes, but
# not as first letter
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_nl_generic(i18n):
#
# Dutch streets are often named after people and include a title.
# The title will be captured as part of the <prefix>
#
APPELLATIONS = [ u"St.", u"Sint", u"Ptr.", u"Pater",
u"Prof.", u"Professor", u"Past.", u"Pastoor",
u"Pr.", u"Prins", u"Prinses", u"Gen.", u"Generaal",
u"Mgr.", u"Monseigneur", u"Mr.", u"Meester",
u"Burg.", u"Burgermeester", u"Dr.", u"Dokter",
u"Ir.", u"Ingenieur", u"Ds.", u"Dominee", u"Deken",
u"Drs.",
# counting words before street name,
# e.g. "1e Walstraat" => "Walstraat (1e)"
u"\d+e",
u"" ]
#
# Surnames in Dutch streets named after people tend to have the middle name
# listed after the rest of the surname,
# e.g. "Prins van Oranjestraat" => "Oranjestraat (Prins van)"
# Likewise, articles are captured as part of the prefix,
# e.g. "Den Urling" => "Urling (Den)"
#
DETERMINANTS = [ u"\s?van der", u"\s?van den", u"\s?van de", u"\s?van",
u"\s?Den", u"\s?D'n", u"\s?D'", u"\s?De", u"\s?'T", u"\s?Het",
u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)),
re.IGNORECASE | re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
#
# Make sure name actually contains something,
# the PREFIX_REGEXP.match fails on zero-length strings
#
if len(name) == 0:
return name
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
matches = self.PREFIX_REGEXP.match(name)
#
# If no prefix was captured, that's okay. Don't substitute
# the name however, "<name> ()" looks silly
#
if matches == None:
return name
if matches.group('prefix'):
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_hr_HR(i18n):
# for _upper_unaccent_string
C_ACCENT = re.compile(ur"[ćč]", re.IGNORECASE | re.UNICODE)
D_ACCENT = re.compile(ur"đ|dž", re.IGNORECASE | re.UNICODE)
N_ACCENT = re.compile(ur"nj", re.IGNORECASE | re.UNICODE)
L_ACCENT = re.compile(ur"lj", re.IGNORECASE | re.UNICODE)
S_ACCENT = re.compile(ur"š", re.IGNORECASE | re.UNICODE)
Z_ACCENT = re.compile(ur"ž", re.IGNORECASE | re.UNICODE)
def _upper_unaccent_string(self, s):
s = self.C_ACCENT.sub("c", s)
s = self.D_ACCENT.sub("d", s)
s = self.N_ACCENT.sub("n", s)
s = self.L_ACCENT.sub("l", s)
s = self.S_ACCENT.sub("s", s)
s = self.Z_ACCENT.sub("z", s)
return s.upper()
def __init__(self, language, locale_path):
"""Install the _() function for the chosen locale other
object initialisation"""
self.language = str(language) # FIXME: why do we have unicode here?
_install_language(language, locale_path)
def language_code(self):
"""returns the language code of the specific language
supported, e.g. fr_FR.UTF-8"""
return self.language
def user_readable_street(self, name):
""" transforms a street name into a suitable form for
the map index, e.g. Paris (Rue de) for French"""
return name
## FIXME: only first letter does not work for Croatian digraphs (dž, lj, nj)
def first_letter_equal(self, a, b):
"""returns True if the letters a and b are equal in the map index,
e.g. É and E are equals in French map index"""
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_pl_generic(i18n):
APPELLATIONS = [ u"Dr.", u"Doktora", u"Ks.", u"Księdza",
u"Generała", u"Gen.",
u"Aleja", u"Plac", u"Pl.",
u"Rondo", u"rondo", u"Profesora",
u"Prof.",
u"" ]
DETERMINANTS = [ u"\s?im.", u"\s?imienia", u"\s?pw.",
u"" ]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)),
re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def language_code(self):
return self.language
def user_readable_street(self, name):
#
# Make sure name actually contains something,
# the PREFIX_REGEXP.match fails on zero-length strings
#
if len(name) == 0:
return name
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
matches = self.PREFIX_REGEXP.match(name)
#
# If no prefix was captured, that's okay. Don't substitute
# the name however, "<name> ()" looks silly
#
if matches == None:
return name
if matches.group('prefix'):
name = self.PREFIX_REGEXP.sub(r"\g<name>, \g<prefix>", name)
return name
def first_letter_equal(self, a, b):
return a == b
class i18n_de_generic(i18n):
#
# German streets are often named after people and include a title.
# The title will be captured as part of the <prefix>
# Covering airport names and "New"/"Old" as prefixes as well
#
APPELLATIONS = [ u"Alte", u"Alter", u"Doktor", u"Dr.",
u"Flughafen", u"Flugplatz", u"Gen.,", u"General",
u"Neue", u"Neuer", u"Platz",
u"Prinz", u"Prinzessin", u"Prof.",
u"Professor" ]
#
# Surnames in german streets named after people tend to have the middle name
# listed after the rest of the surname,
# e.g. "Platz der deutschen Einheit" => "deutschen Einheit (Platz der)"
# Likewise, articles are captured as part of the prefix,
# e.g. "An der Märchenwiese" => "Märchenwiese (An der)"
#
DETERMINANTS = [ u"\s?An den", u"\s?An der", u"\s?Am",
u"\s?Auf den" , u"\s?Auf der"
u" an", u" des", u" der", u" von", u" vor"]
SPACE_REDUCE = re.compile(r"\s+")
PREFIX_REGEXP = re.compile(r"^(?P<prefix>(%s)(%s)?)\s?\b(?P<name>.+)" %
("|".join(APPELLATIONS),
"|".join(DETERMINANTS)), re.IGNORECASE
| re.UNICODE)
# for IndexPageGenerator._upper_unaccent_string
E_ACCENT = re.compile(ur"[éèêëẽ]", re.IGNORECASE | re.UNICODE)
I_ACCENT = re.compile(ur"[íìîïĩ]", re.IGNORECASE | re.UNICODE)
A_ACCENT = re.compile(ur"[áàâäã]", re.IGNORECASE | re.UNICODE)
O_ACCENT = re.compile(ur"[óòôöõ]", re.IGNORECASE | re.UNICODE)
U_ACCENT = re.compile(ur"[úùûüũ]", re.IGNORECASE | re.UNICODE)
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def _upper_unaccent_string(self, s):
s = self.E_ACCENT.sub("e", s)
s = self.I_ACCENT.sub("i", s)
s = self.A_ACCENT.sub("a", s)
s = self.O_ACCENT.sub("o", s)
s = self.U_ACCENT.sub("u", s)
return s.upper()
def language_code(self):
return self.language
def user_readable_street(self, name):
#
# Make sure name actually contains something,
# the PREFIX_REGEXP.match fails on zero-length strings
#
if len(name) == 0:
return name
name = name.strip()
name = self.SPACE_REDUCE.sub(" ", name)
name = self.PREFIX_REGEXP.sub(r"\g<name> (\g<prefix>)", name)
return name
def first_letter_equal(self, a, b):
return self._upper_unaccent_string(a) == self._upper_unaccent_string(b)
class i18n_generic(i18n):
def __init__(self, language, locale_path):
self.language = str(language)
_install_language(language, locale_path)
def language_code(self):
return self.language
def user_readable_street(self, name):
return name
def first_letter_equal(self, a, b):
return a == b
# When not listed in the following map, default language class will be
# i18n_generic
language_class_map = {
'fr_BE.UTF-8': i18n_fr_generic,
'fr_FR.UTF-8': i18n_fr_generic,
'fr_CA.UTF-8': i18n_fr_generic,
'fr_CH.UTF-8': i18n_fr_generic,
'fr_LU.UTF-8': i18n_fr_generic,
'en_AG': i18n_generic,
'en_AU.UTF-8': i18n_generic,
'en_BW.UTF-8': i18n_generic,
'en_CA.UTF-8': i18n_generic,
'en_DK.UTF-8': i18n_generic,
'en_GB.UTF-8': i18n_generic,
'en_HK.UTF-8': i18n_generic,
'en_IE.UTF-8': i18n_generic,
'en_IN': i18n_generic,
'en_NG': i18n_generic,
'en_NZ.UTF-8': i18n_generic,
'en_PH.UTF-8': i18n_generic,
'en_SG.UTF-8': i18n_generic,
'en_US.UTF-8': i18n_generic,
'en_ZA.UTF-8': i18n_generic,
'en_ZW.UTF-8': i18n_generic,
'nl_BE.UTF-8': i18n_nl_generic,
'nl_NL.UTF-8': i18n_nl_generic,
'it_IT.UTF-8': i18n_it_generic,
'it_CH.UTF-8': i18n_it_generic,
'de_AT.UTF-8': i18n_de_generic,
'de_BE.UTF-8': i18n_de_generic,
'de_DE.UTF-8': i18n_de_generic,
'de_LU.UTF-8': i18n_de_generic,
'de_CH.UTF-8': i18n_de_generic,
'es_ES.UTF-8': i18n_es_generic,
'es_AR.UTF-8': i18n_es_generic,
'es_BO.UTF-8': i18n_es_generic,
'es_CL.UTF-8': i18n_es_generic,
'es_CR.UTF-8': i18n_es_generic,
'es_DO.UTF-8': i18n_es_generic,
'es_EC.UTF-8': i18n_es_generic,
'es_SV.UTF-8': i18n_es_generic,
'es_GT.UTF-8': i18n_es_generic,
'es_HN.UTF-8': i18n_es_generic,
'es_MX.UTF-8': i18n_es_generic,
'es_NI.UTF-8': i18n_es_generic,
'es_PA.UTF-8': i18n_es_generic,
'es_PY.UTF-8': i18n_es_generic,
'es_PE.UTF-8': i18n_es_generic,
'es_PR.UTF-8': i18n_es_generic,
'es_US.UTF-8': i18n_es_generic,
'es_UY.UTF-8': i18n_es_generic,
'es_VE.UTF-8': i18n_es_generic,
'ca_ES.UTF-8': i18n_ca_generic,
'ca_AD.UTF-8': i18n_ca_generic,
'ca_FR.UTF-8': i18n_ca_generic,
'pt_BR.UTF-8': i18n_pt_br_generic,
'da_DK.UTF-8': i18n_generic,
'ar_AE.UTF-8': i18n_ar_generic,
'ar_BH.UTF-8': i18n_ar_generic,
'ar_DZ.UTF-8': i18n_ar_generic,
'ar_EG.UTF-8': i18n_ar_generic,
'ar_IN': i18n_ar_generic,
'ar_IQ.UTF-8': i18n_ar_generic,
'ar_JO.UTF-8': i18n_ar_generic,
'ar_KW.UTF-8': i18n_ar_generic,
'ar_LB.UTF-8': i18n_ar_generic,
'ar_LY.UTF-8': i18n_ar_generic,
'ar_MA.UTF-8': i18n_ar_generic,
'ar_OM.UTF-8': i18n_ar_generic,
'ar_QA.UTF-8': i18n_ar_generic,
'ar_SA.UTF-8': i18n_ar_generic,
'ar_SD.UTF-8': i18n_ar_generic,
'ar_SY.UTF-8': i18n_ar_generic,
'ar_TN.UTF-8': i18n_ar_generic,
'ar_YE.UTF-8': i18n_ar_generic,
'hr_HR.UTF-8': i18n_hr_HR,
'ru_RU.UTF-8': i18n_ru_generic,
'pl_PL.UTF-8': i18n_pl_generic,
}
def install_translation(locale_name, locale_path):
"""Return a new i18n class instance, depending on the specified
locale name (eg. "fr_FR.UTF-8"). See output of "locale -a" for a
list of system-supported locale names. When none matching, default
class is i18n_generic"""
language_class = language_class_map.get(locale_name, i18n_generic)
return language_class(locale_name, locale_path)
|
ccnz/eqnz-ocitysmap
|
ocitysmap2/i18n.py
|
Python
|
agpl-3.0
| 34,441
|
import math
import time
# Retry decorator with exponential backoff
def retry(tries, delay=1, backoff=2):
"""Retries a function or method until it returns True.
delay sets the initial delay, and backoff sets how much the delay should
lengthen after each failure. backoff must be greater than 1, or else it
isn't really a backoff. tries must be at least 0, and delay greater than
0."""
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay # make mutable
rv = f(*args, **kwargs) # first attempt
while mtries > 0:
if rv == True or type(rv) == str: # Done on success ..
return rv
mtries -= 1 # consume an attempt
time.sleep(mdelay) # wait...
mdelay *= backoff # make future wait longer
rv = f(*args, **kwargs) # Try again
return False # Ran out of tries :-(
return f_retry # true decorator -> decorated function
return deco_retry # @retry(arg[, ...]) -> true decorator
|
Heipiao/weibo
|
retry.py
|
Python
|
mit
| 1,243
|
from mongothon import create_model, create_model_offline
from pickle import dumps, loads
from unittest import TestCase
from mock import Mock, ANY, call, NonCallableMock
from mongothon import Document, Schema, NotFoundException, Array
from mongothon.validators import one_of
from mongothon.scopes import STANDARD_SCOPES
from bson import ObjectId
from copy import deepcopy
from .fake import FakeCursor
car_schema = Schema({
"make": {"type": basestring, "required": True},
"model": {"type": basestring, "required": True},
"trim": {"type": Schema({
"ac": {"type": bool, "default": True},
"doors": {"type": int, "required": True, "default": 4}
}), "required": True},
"wheels": {"type": Array(Schema({
"position": {"type": basestring, "required": True, "validates": one_of('FR', 'FL', 'RR', 'RL')},
"tire": {"type": basestring},
"diameter": {"type": int}
}))},
"options": {"type": Array(basestring)}
})
doc = {
"make": "Peugeot",
"model": "406",
"trim": {
"ac": False,
"doors": 5
},
"wheels": [
{
"position": "FR",
"tire": "Pirelli",
"diameter": 22
},
{
"position": "FL",
"tire": "Pirelli",
"diameter": 22
},
{
"position": "RR",
"tire": "Michelin",
"diameter": 24
},
{
"position": "RL",
"tire": "Michelin",
"diameter": 24
}
],
"options": ['heated seats', 'leather steering wheel']
}
# This has to live here so pickle can find it.
mock_collection = Mock()
mock_collection.name = "pickleable"
Pickleable = create_model(Mock(), mock_collection)
class TestModel(TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_collection.name = "car"
self.Car = create_model(car_schema, self.mock_collection)
self.CarOffline = create_model_offline(car_schema, lambda: self.mock_collection, 'Car')
self.car = self.Car(doc)
self.car_offline = self.CarOffline(doc)
def tearDown(self):
self.Car.remove_all_handlers()
def assert_predicates(self, model, is_new=False, is_persisted=False, is_deleted=False):
self.assertEquals(is_new, model.is_new())
self.assertEquals(is_persisted, model.is_persisted())
self.assertEquals(is_deleted, model.is_deleted())
def test_class_module_is_that_of_caller(self):
self.assertEqual(self.Car.__module__, 'tests.mongothon.model_test')
def test_class_is_pickleable(self):
pickled = dumps(Pickleable)
unpickled = loads(pickled)
self.assertEqual(unpickled, Pickleable)
def test_class_name_defaults_to_camelcased_collection_name(self):
mock_collection = Mock()
mock_collection.name = "some_model"
SomeModel = create_model(Schema({}), mock_collection)
self.assertEquals("SomeModel", SomeModel.__name__)
def test_class_name_can_be_overridden(self):
mock_collection = Mock()
mock_collection.name = "some_model"
SomeModel = create_model(Schema({}), mock_collection, "SomethingElse")
self.assertEquals("SomethingElse", SomeModel.__name__)
def test_can_be_treated_as_a_dict(self):
self.assertIsInstance(self.car, dict)
self.car['make'] = 'volvo'
self.assertEquals('volvo', self.car['make'])
def test_can_be_treated_as_a_document(self):
self.assertIsInstance(self.car, Document)
self.car['make'] = 'volvo'
self.assertEquals('volvo', self.car['make'])
def test_constructor_with_kwargs(self):
car = self.Car(doc, _id='new_car_id')
self.assertEquals('new_car_id', car['_id'])
def test_constructor_with_kwargs_and_initial_state(self):
handler = Mock()
self.Car.on('did_find', handler)
car = self.Car(doc, initial_state=self.Car.PERSISTED, _id='new_car_id')
self.assertEquals('new_car_id', car['_id'])
self.assertEquals(self.Car.PERSISTED, car._state)
handler.assert_called_once_with(car)
def test_instantiate(self):
self.assert_predicates(self.car, is_new=True)
def test_validation_of_valid_doc(self):
self.car.validate()
def test_validation_respects_defaults(self):
# this would cause validation to fail without a default being applied
del self.car['trim']['doors']
self.car.validate()
def test_validation_does_not_apply_defaults_to_instance(self):
del self.car['trim']['doors']
self.car.validate()
self.assertFalse('doors' in self.car['trim'])
def test_apply_defaults(self):
del self.car['trim']['doors']
self.car.apply_defaults()
self.assertEquals(4, self.car['trim']['doors'])
def test_save_applies_defaults(self):
del self.car['trim']['doors']
self.car.save()
self.assertEqual(4, self.car['trim']['doors'])
def test_save_rolls_back_defaults_if_save_fails(self):
del self.car['trim']['doors']
self.mock_collection.save = Mock(side_effect=Exception('IO error'))
try:
self.car.save()
except:
self.assertFalse('doors' in self.car['trim'])
def test_save_passes_arguments_to_collection(self):
self.car.save(manipulate=False, safe=True, check_keys=False)
self.mock_collection.save.assert_called_with(ANY, manipulate=False, safe=True, check_keys=False)
def test_save_changes_state_to_persisted(self):
self.car.save()
self.assert_predicates(self.car, is_persisted=True)
def test_save_resets_change_tracking(self):
self.car['trim']['ac'] = True
self.car['make'] = 'Rover'
self.car.save()
self.assertFalse(self.car['trim'].changed)
self.assertFalse(self.car.changed)
def test_remove(self):
oid = ObjectId()
self.car['_id'] = oid
self.car.remove()
self.mock_collection.remove.assert_called_with(oid)
self.assert_predicates(self.car, is_deleted=True)
def test_insert(self):
self.Car.insert(doc)
self.mock_collection.insert.assert_called_with(doc)
def test_update_instance(self):
oid = ObjectId()
self.car['_id'] = oid
self.car.save()
self.car.update_instance({'model': '106'})
self.assert_predicates(self.car, is_persisted=True)
self.mock_collection.update.assert_called_with(
{'_id': oid}, {'model': '106'})
def test_update_on_instance(self):
oid = ObjectId()
self.car['_id'] = oid
self.car.update({'model': '106'})
self.assertFalse(self.mock_collection.called)
self.assertEqual(self.car['model'], '106')
def test_update_on_class(self):
oid = ObjectId()
self.car['_id'] = oid
self.car.save()
self.Car.update({'_id': oid}, {'model': '106'})
self.assert_predicates(self.car, is_persisted=True)
self.mock_collection.update.assert_called_with(
{'_id': oid}, {'model': '106'})
def test_count(self):
self.mock_collection.count.return_value = 45
self.assertEquals(45, self.Car.count())
def test_find_one(self):
self.mock_collection.find_one.return_value = doc
loaded_car = self.Car.find_one({'make': 'Peugeot'})
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'make': 'Peugeot'})
def test_find_one_missing_record(self):
self.mock_collection.find_one.return_value = None
loaded_car = self.Car.find_one({'make': 'Peugeot'})
self.assertIsNone(loaded_car)
def test_find(self):
cursor = FakeCursor([{'make': 'Peugeot', 'model': '405'}, {'make': 'Peugeot', 'model': '205'}])
self.mock_collection.find.return_value = cursor
cars = self.Car.find({'make': 'Peugeot'}, limit=2)
self.assertIsInstance(cars[0], self.Car)
self.assertEqual(2, cars.count())
for car in cars:
self.assert_predicates(car, is_persisted=True)
self.assertIsInstance(car, self.Car)
def test_find_with_iterator_protocol(self):
cursor = FakeCursor([{'make': 'Peugeot', 'model': '405'}, {'make': 'Peugeot', 'model': '205'}])
self.mock_collection.find.return_value = cursor
cars = self.Car.find({'make': 'Peugeot'}, limit=2)
iter1 = cars.__iter__()
iter2 = cars.__iter__()
self.assertIsInstance(iter1.next(), self.Car)
self.assertIsInstance(iter2.next(), self.Car)
self.assertIsInstance(iter1.next(), self.Car)
self.assertIsInstance(iter2.next(), self.Car)
def test_find_by_id(self):
self.mock_collection.find_one.return_value = doc
oid = ObjectId()
loaded_car = self.Car.find_by_id(oid)
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'_id': oid})
def test_find_by_id_handles_integer_id(self):
self.mock_collection.find_one.return_value = doc
loaded_car = self.Car.find_by_id(33)
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'_id': 33})
def test_find_by_id_handles_oid_as_string(self):
self.mock_collection.find_one.return_value = doc
oid = ObjectId()
loaded_car = self.Car.find_by_id(str(oid))
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'_id': oid})
def test_find_by_id_missing_record(self):
"""Test that find_by_id throws a NotFoundException if the requested record does not exist"""
self.mock_collection.find_one.return_value = None
with self.assertRaises(NotFoundException):
self.Car.find_by_id(ObjectId())
def test_find_by_id_handles_non_oid_string_id(self):
self.mock_collection.find_one.return_value = doc
loaded_car = self.Car.find_by_id("bob")
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'_id': "bob"})
def test_reload(self):
updated_doc = deepcopy(doc)
updated_doc['make'] = 'Volvo'
self.mock_collection.find_one.side_effect = [doc, updated_doc]
oid = ObjectId()
car = self.Car.find_by_id(str(oid))
car['_id'] = oid
car.reload()
self.assertEquals(updated_doc, car)
self.mock_collection.find_one.assert_has_calls([
call({'_id': oid}), call({'_id': oid})])
def assert_returns_wrapped_cursor(self, attr_name):
cursor = FakeCursor([{'make': 'Peugeot', 'model': '405'}, {'make': 'Peugeot', 'model': '205'}])
self.mock_collection.find.return_value = cursor
cars = getattr(self.Car.find({'make': 'Peugeot'}), attr_name)()
self.assertIsInstance(cars[0], self.Car)
def test_limit_cursor(self):
self.assert_returns_wrapped_cursor('limit')
def test_rewind_cursor(self):
self.assert_returns_wrapped_cursor('rewind')
def test_clone_cursor(self):
self.assert_returns_wrapped_cursor('clone')
def test_add_option_cursor(self):
self.assert_returns_wrapped_cursor('add_option')
def test_remove_option_cursor(self):
self.assert_returns_wrapped_cursor('remove_option')
def test_batch_size_cursor(self):
self.assert_returns_wrapped_cursor('batch_size')
def test_skip_cursor(self):
self.assert_returns_wrapped_cursor('skip')
def test_max_scan_cursor(self):
self.assert_returns_wrapped_cursor('max_scan')
def test_sort_cursor(self):
self.assert_returns_wrapped_cursor('sort')
def test_hint_cursor(self):
self.assert_returns_wrapped_cursor('hint')
def test_where_cursor(self):
self.assert_returns_wrapped_cursor('where')
def call_tracker(self, **kwargs):
"""Groups together mocks for the purpose of tracking the
order of calls across all of those mocks."""
tracker = Mock()
for key, value in kwargs.iteritems():
setattr(tracker, key, value)
return tracker
def test_will_save_event(self):
handler = Mock()
tracker = self.call_tracker(handler=handler, collection=self.mock_collection)
self.Car.on('will_save', handler)
self.car.save()
self.assertEquals([call.handler(self.car), call.collection.save(self.car)], tracker.mock_calls)
def test_did_save_event(self):
handler = Mock()
tracker = self.call_tracker(handler=handler, collection=self.mock_collection)
self.Car.on('did_save', handler)
self.car.save()
self.assertEquals([call.collection.save(self.car), call.handler(self.car)], tracker.mock_calls)
def test_changes_available_to_did_save_event_handler(self):
inner = Mock()
def handler(car):
self.assertTrue(car.changed)
self.assertEqual({'ac': True}, car['trim'].changed)
self.assertEqual({}, car['trim'].added)
self.assertEquals({'diameter': (22, 23)}, car['wheels'][0].changes)
inner()
self.Car.on('did_save', handler)
self.car['make'] = 'Rover'
self.car['trim']['ac'] = True
self.car['wheels'][0]['diameter'] = 23
self.car.save()
inner.assert_called_once_with()
def test_changes_available_to_will_validate_event_handler(self):
inner = Mock()
def handler(car):
self.assertTrue(car.changed)
self.assertIn('ac', car['trim'].changed)
self.assertEqual({}, car['trim'].added)
self.assertEquals({'diameter': (22, 23)}, car['wheels'][0].changes)
self.assertIsInstance(car, self.Car)
inner()
self.Car.on('will_validate', handler)
self.car['make'] = 'Rover'
self.car['trim']['ac'] = True
self.car['wheels'][0]['diameter'] = 23
self.car.save()
inner.assert_called_once_with()
def test_will_validate_event(self):
handler = Mock()
car_schema.validate = Mock()
tracker = self.call_tracker(handler=handler, validate=car_schema.validate)
self.Car.on('will_validate', handler)
self.car.validate()
self.assertEquals([call.handler(self.car), call.validate(self.car)], tracker.mock_calls)
def test_did_validate_event(self):
handler = Mock()
car_schema.validate = Mock()
tracker = self.call_tracker(handler=handler, validate=car_schema.validate)
self.Car.on('did_validate', handler)
self.car.validate()
self.assertEquals([call.validate(self.car), call.handler(self.car)], tracker.mock_calls)
def test_did_init_event(self):
handler = Mock()
self.Car.on('did_init', handler)
car = self.Car()
handler.assert_called_once_with(car)
def test_will_update_event(self):
handler = Mock()
self.Car.on('will_update', handler)
self.car['_id'] = 'abc'
self.car.update_instance({"$set": {"somefield": "somevalue"}}, safe=True)
handler.assert_called_once_with(self.car, {"$set": {"somefield": "somevalue"}}, safe=True)
def test_did_update_event(self):
handler = Mock()
self.Car.on('did_update', handler)
self.car['_id'] = 'abc'
self.car.update_instance({"$set": {"somefield": "somevalue"}}, safe=True)
handler.assert_called_once_with(self.car, {"$set": {"somefield": "somevalue"}}, safe=True)
def test_will_remove_event(self):
handler = Mock()
self.Car.on('will_remove', handler)
self.car['_id'] = 'abc'
self.car.remove(True, j=True)
handler.assert_called_once_with(self.car, True, j=True)
def test_did_remove_event(self):
handler = Mock()
self.Car.on('did_remove', handler)
self.car['_id'] = 'abc'
self.car.remove(True, j=True)
handler.assert_called_once_with(self.car, True, j=True)
def test_will_apply_defaults_event(self):
handler = Mock()
self.Car.on('will_apply_defaults', handler)
self.car.apply_defaults()
handler.assert_called_once_with(self.car)
def test_did_apply_defaults_event(self):
handler = Mock()
self.Car.on('did_apply_defaults', handler)
self.car.apply_defaults()
handler.assert_called_once_with(self.car)
def test_did_find_event(self):
handler = Mock()
self.Car.on('did_find', handler)
cursor = FakeCursor([{'make': 'Peugeot', 'model': '405'}, {'make': 'Peugeot', 'model': '205'}])
self.mock_collection.find.return_value = cursor
cars = self.Car.find({'make': 'Peugeot'}, limit=2)
self.assertEqual([call(cars[0]), call(cars[1])], handler.mock_calls)
def test_will_reload_event(self):
handler = Mock()
self.car['_id'] = ObjectId()
self.Car.on('will_reload', handler)
doc = {'make': 'Peugeot', 'model': '405'}
self.mock_collection.find_one.return_value = doc
self.car.reload()
handler.assert_called_once_with(self.car)
def test_did_reload_event(self):
handler = Mock()
self.car['_id'] = ObjectId()
self.Car.on('did_reload', handler)
doc = {'make': 'Peugeot', 'model': '405'}
self.mock_collection.find_one.return_value = doc
self.car.reload()
handler.assert_called_once_with(self.car)
def test_did_find_event_not_fire_for_simple_init(self):
handler = Mock()
self.Car.on('did_find', handler)
self.Car()
self.assertFalse(handler.called)
def test_register_event_handler_with_decorator(self):
stub = Mock()
@self.Car.on('did_init')
def func(*args, **kwargs):
stub(*args, **kwargs)
car = self.Car()
stub.assert_called_once_with(car)
def test_on_decorator_with_other_decorators(self):
outer_decorator = Mock()
@outer_decorator
@self.Car.on('did_init')
def func(*args, **kwargs):
pass
outer_decorator.assert_called_once_with(ANY)
self.assertTrue(callable(outer_decorator.call_args[0][0]))
def test_emit_custom_event(self):
handler = Mock()
self.Car.on('fruit_explosion', handler)
self.car.emit('fruit_explosion', 'apples', other_fruit='oranges')
handler.assert_called_once_with(self.car, 'apples', other_fruit='oranges')
def test_remove_handler(self):
handler = Mock()
self.Car.on('did_init', handler)
self.Car.remove_handler('did_init', handler)
self.Car()
self.assertEquals(0, handler.call_count)
def test_remove_all_handlers(self):
handler = Mock()
self.Car.on('did_init', handler)
self.Car.remove_all_handlers()
self.Car()
self.assertEquals(0, handler.call_count)
def test_handlers(self):
handler = Mock()
self.Car.on('did_init', handler)
self.assertEquals([handler], self.Car.handlers('did_init'))
def test_different_classes_managed_their_own_handlers(self):
CarA = create_model(car_schema, Mock())
CarB = create_model(car_schema, Mock())
handler = Mock()
CarA.on('did_save', handler)
self.assertEquals([], CarB.handlers('did_save'))
def test_static_method_registration(self):
@self.Car.static_method
def format_make(make):
self.assertEquals("mercedez-benz", make)
return make.title()
self.assertEquals("Mercedez-Benz", self.Car.format_make("mercedez-benz"))
def test_static_method_registration_with_other_decorators(self):
outer_decorator = Mock()
@outer_decorator
@self.Car.static_method
def format_make(make):
pass
outer_decorator.assert_called_once_with(ANY)
self.assertTrue(callable(outer_decorator.call_args[0][0]))
def test_class_method_registration(self):
response = Mock()
@self.Car.class_method
def find_by_make(Car, make):
self.assertEquals(Car, self.Car)
self.assertEquals("Peugeot", make)
return response
self.assertEquals(response, self.Car.find_by_make("Peugeot"))
def test_class_method_registration_with_other_decorators(self):
outer_decorator = Mock()
@outer_decorator
@self.Car.class_method
def find_by_make(Car, make):
pass
outer_decorator.assert_called_once_with(ANY)
self.assertTrue(callable(outer_decorator.call_args[0][0]))
def test_instance_method_registration(self):
response = Mock()
@self.Car.instance_method
def add_option(car, option):
self.assertIsInstance(car, self.Car)
self.assertEquals(option, "sunroof")
return response
car = self.Car(doc)
self.assertEquals(response, car.add_option("sunroof"))
def test_instance_method_registration_with_other_decorators(self):
outer_decorator = Mock()
@outer_decorator
@self.Car.instance_method
def add_option(car, option):
pass
outer_decorator.assert_called_once_with(ANY)
self.assertTrue(callable(outer_decorator.call_args[0][0]))
def test_scope_query(self):
@self.Car.scope
def with_ac(available=True):
return {"trim.ac": available}
@self.Car.scope
def hatchback():
return {"trim.doors": {"$in": [3, 5]}}, {}, {"sort": [("make", -1)]}
cursor = FakeCursor([{'make': 'Peugeot', 'model': '405'}, {'make': 'Peugeot', 'model': '205'}])
self.mock_collection.find.return_value = cursor
cars = self.Car.hatchback().with_ac().where({'year': 2005})
self.assertIsInstance(cars[0], self.Car)
self.mock_collection.find.assert_called_once_with(
{"trim.ac": True, "trim.doors": {"$in": [3, 5]}, "year": 2005},
None,
sort=[("make", -1)])
self.assertEqual(2, cars.count())
for car in cars:
self.assertIsInstance(car, self.Car)
def test_scope_with_other_decorator(self):
outer_decorator = Mock()
@outer_decorator
@self.Car.scope
def with_ac(available=True):
return {"trim.ac": available}
outer_decorator.assert_called_once_with(ANY)
self.assertTrue(callable(outer_decorator.call_args[0][0]))
def test_models_maintain_their_own_scope_lists(self):
CarA = create_model(car_schema, Mock())
CarB = create_model(car_schema, Mock())
def scope_a():
return {}
def scope_b():
return {}
CarA.scope(scope_a)
CarB.scope(scope_b)
self.assertEquals(STANDARD_SCOPES + [scope_a], CarA.scopes)
self.assertEquals(STANDARD_SCOPES + [scope_b], CarB.scopes)
def test_find_one_from_offline_model(self):
self.mock_collection.find_one.return_value = doc
loaded_car = self.CarOffline.find_one({'make': 'Peugeot'})
self.assertEquals(doc, loaded_car)
self.assert_predicates(loaded_car, is_persisted=True)
self.mock_collection.find_one.assert_called_with({'make': 'Peugeot'})
def test_update_instance_from_offline_model(self):
oid = ObjectId()
self.car_offline['_id'] = oid
self.car_offline.save()
self.car_offline.update_instance({'model': '106'})
self.assert_predicates(self.car_offline, is_persisted=True)
self.mock_collection.update.assert_called_with(
{'_id': oid}, {'model': '106'})
|
gamechanger/mongothon
|
tests/mongothon/model_test.py
|
Python
|
mit
| 24,340
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import math
from copy import deepcopy
from compas.geometry import scale_vector
from compas.geometry import normalize_vector
from compas.geometry import subtract_vectors
from compas.geometry import cross_vectors
from compas.geometry import dot_vectors
from compas.geometry import multiply_matrix_vector
from compas.geometry import length_vector
from compas.geometry import allclose
from compas.geometry import multiply_matrices
from compas.geometry import transpose_matrix
from compas.geometry import norm_vector
_EPS = 1e-16
"""eps for testing whether a number is close to zero"""
_SPEC2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
"""used for Euler angles: to map rotation type and axes to tuples of inner axis, parity, repetition, frame"""
_NEXT_SPEC = [1, 2, 0, 1]
__all__ = [
'matrix_determinant',
'matrix_inverse',
'decompose_matrix',
'compose_matrix',
'identity_matrix',
'matrix_from_frame',
'matrix_from_frame_to_frame',
'matrix_from_change_of_basis',
'matrix_from_euler_angles',
'matrix_from_axis_and_angle',
'matrix_from_axis_angle_vector',
'matrix_from_basis_vectors',
'matrix_from_translation',
'matrix_from_orthogonal_projection',
'matrix_from_parallel_projection',
'matrix_from_perspective_projection',
'matrix_from_perspective_entries',
'matrix_from_shear_entries',
'matrix_from_shear',
'matrix_from_scale_factors',
'matrix_from_quaternion',
'euler_angles_from_matrix',
'euler_angles_from_quaternion',
'axis_and_angle_from_matrix',
'axis_angle_vector_from_matrix',
'axis_angle_from_quaternion',
'quaternion_from_matrix',
'quaternion_from_euler_angles',
'quaternion_from_axis_angle',
'basis_vectors_from_matrix',
'translation_from_matrix',
]
def is_matrix_square(M):
"""Verify that a matrix is square.
Parameters
----------
M : list[list[float]]
The matrix.
Returns
-------
bool
True if the length of every row is equal to the number of rows.
False otherwise.
Examples
--------
>>> M = identity_matrix(4)
>>> is_matrix_square(M)
True
"""
number_of_rows = len(M)
for row in M:
if len(row) != number_of_rows:
return False
return True
def matrix_minor(M, i, j):
"""Construct the minor corresponding to an element of a matrix.
Parameters
----------
M : list[list[float]]
The matrix.
i : int
Row index of the minor.
j : int
Column index of the minor.
Returns
-------
list[list[float]]
The minor.
"""
return [row[:j] + row[j + 1:] for row in (M[:i] + M[i + 1:])]
def matrix_determinant(M, check=True):
"""Calculates the determinant of a square matrix M.
Parameters
----------
M : list[list[float]]
A square matrix of any dimension.
check : bool
If True, checks if the matrix is square.
Raises
------
ValueError
If the matrix is not square.
Returns
-------
float
The determinant.
Examples
--------
>>> M = identity_matrix(4)
>>> matrix_determinant(M)
1.0
"""
dim = len(M)
if check:
if not is_matrix_square(M):
raise ValueError("Not a square matrix")
if dim == 2:
return M[0][0] * M[1][1] - M[0][1] * M[1][0]
D = 0
for c in range(dim):
D += (-1) ** c * M[0][c] * matrix_determinant(matrix_minor(M, 0, c), check=False)
return D
def matrix_inverse(M):
"""Calculates the inverse of a square matrix M.
Parameters
----------
M : list[list[float]]
A square matrix of any dimension.
Returns
-------
list[list[float]]
The inverted matrix.
Raises
------
ValueError
If the matrix is not squared
ValueError
If the matrix is singular.
ValueError
If the matrix is not invertible.
Examples
--------
>>> from compas.geometry import Frame
>>> f = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = matrix_from_frame(f)
>>> I = multiply_matrices(T, matrix_inverse(T))
>>> I2 = identity_matrix(4)
>>> allclose(I[0], I2[0])
True
>>> allclose(I[1], I2[1])
True
>>> allclose(I[2], I2[2])
True
>>> allclose(I[3], I2[3])
True
"""
D = matrix_determinant(M)
if D == 0:
ValueError("The matrix is singular.")
if len(M) == 2:
return [[M[1][1] / D, -1 * M[0][1] / D],
[-1 * M[1][0] / D, M[0][0] / D]]
cofactors = []
for r in range(len(M)):
cofactor_row = []
for c in range(len(M)):
cofactor_row.append((-1) ** (r + c) * matrix_determinant(matrix_minor(M, r, c)))
cofactors.append(cofactor_row)
cofactors = transpose_matrix(cofactors)
for r in range(len(cofactors)):
for c in range(len(cofactors)):
cofactors[r][c] = cofactors[r][c] / D
return cofactors
def decompose_matrix(M):
"""Calculates the components of rotation, translation, scale, shear, and
perspective of a given transformation matrix M. [1]_
Parameters
----------
M : list[list[float]]
The square matrix of any dimension.
Raises
------
ValueError
If matrix is singular or degenerative.
Returns
-------
scale : [float, float, float]
The 3 scale factors in x-, y-, and z-direction.
shear : [float, float, float]
The 3 shear factors for x-y, x-z, and y-z axes.
angles : [float, float, float]
The rotation specified through the 3 Euler angles about static x, y, z axes.
translation : [float, float, float]
The 3 values of translation.
perspective : [float, float, float, float]
The 4 perspective entries of the matrix.
Examples
--------
>>> trans1 = [1, 2, 3]
>>> angle1 = [-2.142, 1.141, -0.142]
>>> scale1 = [0.123, 2, 0.5]
>>> T = matrix_from_translation(trans1)
>>> R = matrix_from_euler_angles(angle1)
>>> S = matrix_from_scale_factors(scale1)
>>> M = multiply_matrices(multiply_matrices(T, R), S)
>>> # M = compose_matrix(scale1, None, angle1, trans1, None)
>>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M)
>>> allclose(scale1, scale2)
True
>>> allclose(angle1, angle2)
True
>>> allclose(trans1, trans2)
True
References
----------
.. [1] Slabaugh, 1999. *Computing Euler angles from a rotation matrix*.
Available at: http://www.gregslabaugh.net/publications/euler.pdf
"""
fabs = math.fabs
cos = math.cos
atan2 = math.atan2
asin = math.asin
pi = math.pi
detM = matrix_determinant(M) # raises ValueError if matrix is not squared
if detM == 0:
ValueError("The matrix is singular.")
Mt = transpose_matrix(M)
if abs(Mt[3][3]) < _EPS:
raise ValueError('The element [3,3] of the matrix is zero.')
for i in range(4):
for j in range(4):
Mt[i][j] /= Mt[3][3]
translation = [M[0][3], M[1][3], M[2][3]]
# scale, shear, angles
scale = [0.0, 0.0, 0.0]
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
# copy Mt[:3, :3] into row
row = [[0, 0, 0] for i in range(3)]
for i in range(3):
for j in range(3):
row[i][j] = Mt[i][j]
scale[0] = norm_vector(row[0])
for i in range(3):
row[0][i] /= scale[0]
shear[0] = dot_vectors(row[0], row[1])
for i in range(3):
row[1][i] -= row[0][i] * shear[0]
scale[1] = norm_vector(row[1])
for i in range(3):
row[1][i] /= scale[1]
shear[0] /= scale[1]
shear[1] = dot_vectors(row[0], row[2])
for i in range(3):
row[2][i] -= row[0][i] * shear[1]
shear[2] = dot_vectors(row[1], row[2])
for i in range(3):
row[2][i] -= row[0][i] * shear[2]
scale[2] = norm_vector(row[2])
for i in range(3):
row[2][i] /= scale[2]
shear[1] /= scale[2]
shear[2] /= scale[2]
if dot_vectors(row[0], cross_vectors(row[1], row[2])) < 0:
scale = [-x for x in scale]
row = [[-x for x in y] for y in row]
# angles
if row[0][2] != -1. and row[0][2] != 1.:
beta1 = asin(-row[0][2])
# beta2 = pi - beta1
alpha1 = atan2(row[1][2] / cos(beta1), row[2][2] / cos(beta1))
# alpha2 = atan2(row[1][2] / cos(beta2), row[2][2] / cos(beta2))
gamma1 = atan2(row[0][1] / cos(beta1), row[0][0] / cos(beta1))
# gamma2 = atan2(row[0][1] / cos(beta2), row[0][0] / cos(beta2))
angles = [alpha1, beta1, gamma1]
else:
gamma = 0.
if row[0][2] == -1.:
beta = pi / 2.
alpha = gamma + atan2(row[1][0], row[2][0])
else: # row[0][2] == 1
beta = -pi / 2.
alpha = -gamma + atan2(-row[1][0], -row[2][0])
angles = [alpha, beta, gamma]
# perspective
if fabs(Mt[0][3]) > _EPS and fabs(Mt[1][3]) > _EPS and fabs(Mt[2][3]) > _EPS:
P = deepcopy(Mt)
P[0][3], P[1][3], P[2][3], P[3][3] = 0.0, 0.0, 0.0, 1.0
Ptinv = matrix_inverse(transpose_matrix(P))
perspective = multiply_matrix_vector(Ptinv, [Mt[0][3], Mt[1][3], Mt[2][3], Mt[3][3]])
else:
perspective = [0.0, 0.0, 0.0, 1.0]
return scale, shear, angles, translation, perspective
def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None):
"""Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective.
Parameters
----------
scale : [float, float, float]
The 3 scale factors in x-, y-, and z-direction.
shear : [float, float, float]
The 3 shear factors for x-y, x-z, and y-z axes.
angles : [float, float, float]
The rotation specified through the 3 Euler angles about static x, y, z axes.
translation : [float, float, float]
The 3 values of translation.
perspective : [float, float, float, float]
The 4 perspective entries of the matrix.
Returns
-------
list[list[float]]
The 4x4 matrix that combines the provided transformation components.
Examples
--------
>>> trans1 = [1, 2, 3]
>>> angle1 = [-2.142, 1.141, -0.142]
>>> scale1 = [0.123, 2, 0.5]
>>> M = compose_matrix(scale1, None, angle1, trans1, None)
>>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M)
>>> allclose(scale1, scale2)
True
>>> allclose(angle1, angle2)
True
>>> allclose(trans1, trans2)
True
"""
M = [[1. if i == j else 0. for i in range(4)] for j in range(4)]
if perspective is not None:
P = matrix_from_perspective_entries(perspective)
M = multiply_matrices(M, P)
if translation is not None:
T = matrix_from_translation(translation)
M = multiply_matrices(M, T)
if angles is not None:
R = matrix_from_euler_angles(angles, static=True, axes="xyz")
M = multiply_matrices(M, R)
if shear is not None:
H = matrix_from_shear_entries(shear)
M = multiply_matrices(M, H)
if scale is not None:
S = matrix_from_scale_factors(scale)
M = multiply_matrices(M, S)
for i in range(4):
for j in range(4):
M[i][j] /= M[3][3]
return M
def identity_matrix(dim):
"""Construct an identity matrix.
Parameters
----------
dim : int
The number of rows and/or columns of the matrix.
Returns
-------
list of list
A list of `dim` lists, with each list containing `dim` elements.
The items on the "diagonal" are one.
All other items are zero.
Examples
--------
>>> identity_matrix(4)
[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
"""
return [[1. if i == j else 0. for i in range(dim)] for j in range(dim)]
def matrix_from_frame(frame):
"""Computes a change of basis transformation from world XY to the frame.
Parameters
----------
frame : :class:`~compas.geometry.Frame`
A frame describing the targeted Cartesian coordinate system
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing the transformation from
world coordinates to frame coordinates.
Examples
--------
>>> from compas.geometry import Frame
>>> f = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = matrix_from_frame(f)
"""
M = identity_matrix(4)
M[0][0], M[1][0], M[2][0] = frame.xaxis
M[0][1], M[1][1], M[2][1] = frame.yaxis
M[0][2], M[1][2], M[2][2] = frame.zaxis
M[0][3], M[1][3], M[2][3] = frame.point
return M
def matrix_from_frame_to_frame(frame_from, frame_to):
"""Computes a transformation between two frames.
This transformation allows to transform geometry from one Cartesian
coordinate system defined by `frame_from` to another Cartesian
coordinate system defined by `frame_to`.
Parameters
----------
frame_from : :class:`~compas.geometry.Frame`
A frame defining the original Cartesian coordinate system
frame_to : :class:`~compas.geometry.Frame`
A frame defining the targeted Cartesian coordinate system
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing the transformation
from one frame to another.
Examples
--------
>>> from compas.geometry import Frame
>>> f1 = Frame([2, 2, 2], [0.12, 0.58, 0.81], [-0.80, 0.53, -0.26])
>>> f2 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = matrix_from_frame_to_frame(f1, f2)
"""
T1 = matrix_from_frame(frame_from)
T2 = matrix_from_frame(frame_to)
return multiply_matrices(T2, matrix_inverse(T1))
def matrix_from_change_of_basis(frame_from, frame_to):
"""Computes a change of basis transformation between two frames.
A basis change is essentially a remapping of geometry from one
coordinate system to another.
Parameters
----------
frame_from : :class:`~compas.geometry.Frame`
A frame defining the original Cartesian coordinate system
frame_to : :class:`~compas.geometry.Frame`
A frame defining the targeted Cartesian coordinate system
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing a change of basis.
Examples
--------
>>> from compas.geometry import Point, Frame
>>> f1 = Frame([2, 2, 2], [0.12, 0.58, 0.81], [-0.80, 0.53, -0.26])
>>> f2 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = matrix_from_change_of_basis(f1, f2)
"""
T1 = matrix_from_frame(frame_from)
T2 = matrix_from_frame(frame_to)
return multiply_matrices(matrix_inverse(T2), T1)
def matrix_from_euler_angles(euler_angles, static=True, axes='xyz'):
"""Calculates a rotation matrix from Euler angles.
In 3D space any orientation can be achieved by composing three elemental
rotations, rotations about the axes (x, y, z) of a coordinate system. A
triple of Euler angles can be interpreted in 24 ways, which depends on if
the rotations are applied to a static (extrinsic) or rotating (intrinsic)
frame and the order of axes.
Parameters
----------
euler_angles : [float, float, float]
Three numbers that represent the angles of rotations about the defined axes.
static : bool, optional
If True the rotations are applied to a static frame.
If False, to a rotational.
axes : Literal['xyz', 'yzx', 'zxy'], optional
A 3 character string specifying order of the axes.
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing a rotation.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> R = matrix_from_euler_angles(ea1)
>>> ea2 = euler_angles_from_matrix(R)
>>> allclose(ea1, ea2)
True
"""
global _SPEC2TUPLE
global _NEXT_SPEC
sin = math.sin
cos = math.cos
ai, aj, ak = euler_angles
if static:
firstaxis, parity, repetition, frame = _SPEC2TUPLE["s" + axes]
else:
firstaxis, parity, repetition, frame = _SPEC2TUPLE["r" + axes]
i = firstaxis
j = _NEXT_SPEC[i + parity]
k = _NEXT_SPEC[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = sin(ai), sin(aj), sin(ak)
ci, cj, ck = cos(ai), cos(aj), cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = [[1. if x == y else 0. for x in range(4)] for y in range(4)]
if repetition:
M[i][i] = cj
M[i][j] = sj * si
M[i][k] = sj * ci
M[j][i] = sj * sk
M[j][j] = -cj * ss + cc
M[j][k] = -cj * cs - sc
M[k][i] = -sj * ck
M[k][j] = cj * sc + cs
M[k][k] = cj * cc - ss
else:
M[i][i] = cj * ck
M[i][j] = sj * sc - cs
M[i][k] = sj * cc + ss
M[j][i] = cj * sk
M[j][j] = sj * ss + cc
M[j][k] = sj * cs - sc
M[k][i] = -sj
M[k][j] = cj * si
M[k][k] = cj * ci
return M
def euler_angles_from_matrix(M, static=True, axes='xyz'):
"""Returns Euler angles from the rotation matrix M according to specified
axis sequence and type of rotation.
Parameters
----------
M : list[list[float]]
The 3x3 or 4x4 matrix in row-major order.
static : bool, optional
If True the rotations are applied to a static frame.
If False, to a rotational.
axes : str, optional
A 3 character string specifying order of the axes.
Returns
-------
list[float]
The 3 Euler angles.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> R = matrix_from_euler_angles(ea1)
>>> ea2 = euler_angles_from_matrix(R)
>>> allclose(ea1, ea2)
True
"""
global _SPEC2TUPLE
global _NEXT_SPEC
global _EPS
atan2 = math.atan2
sqrt = math.sqrt
if static:
firstaxis, parity, repetition, frame = _SPEC2TUPLE["s" + axes]
else:
firstaxis, parity, repetition, frame = _SPEC2TUPLE["r" + axes]
i = firstaxis
j = _NEXT_SPEC[i + parity]
k = _NEXT_SPEC[i - parity + 1]
if repetition:
sy = sqrt(M[i][j] * M[i][j] + M[i][k] * M[i][k])
if sy > _EPS:
ax = atan2(M[i][j], M[i][k])
ay = atan2(sy, M[i][i])
az = atan2(M[j][i], -M[k][i])
else:
ax = atan2(-M[j][k], M[j][j])
ay = atan2(sy, M[i][i])
az = 0.0
else:
cy = sqrt(M[i][i] * M[i][i] + M[j][i] * M[j][i])
if cy > _EPS:
ax = atan2(M[k][j], M[k][k])
ay = atan2(-M[k][i], cy)
az = atan2(M[j][i], M[i][i])
else:
ax = atan2(-M[j][k], M[j][j])
ay = atan2(-M[k][i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return [ax, ay, az]
def matrix_from_axis_and_angle(axis, angle, point=None):
"""Calculates a rotation matrix from an rotation axis, an angle and an optional
point of rotation.
Parameters
----------
axis : [float, float, float]
Three numbers that represent the axis of rotation.
angle : float
The rotation angle in radians.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
A point to perform a rotation around an origin other than [0, 0, 0].
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing a rotation.
Notes
-----
The rotation is based on the right hand rule, i.e. anti-clockwise if the
axis of rotation points towards the observer.
Examples
--------
>>> axis1 = normalize_vector([-0.043, -0.254, 0.617])
>>> angle1 = 0.1
>>> R = matrix_from_axis_and_angle(axis1, angle1)
>>> axis2, angle2 = axis_and_angle_from_matrix(R)
>>> allclose(axis1, axis2)
True
>>> allclose([angle1], [angle2])
True
"""
if not point:
point = [0.0, 0.0, 0.0]
axis = list(axis)
if length_vector(axis):
axis = normalize_vector(axis)
sina = math.sin(angle)
cosa = math.cos(angle)
R = [[cosa, 0.0, 0.0], [0.0, cosa, 0.0], [0.0, 0.0, cosa]]
outer_product = [[axis[i] * axis[j] * (1.0 - cosa) for i in range(3)] for j in range(3)]
R = [[R[i][j] + outer_product[i][j] for i in range(3)] for j in range(3)]
axis = scale_vector(axis, sina)
m = [[0.0, -axis[2], axis[1]],
[axis[2], 0.0, -axis[0]],
[-axis[1], axis[0], 0.0]]
M = identity_matrix(4)
for i in range(3):
for j in range(3):
R[i][j] += m[i][j]
M[i][j] = R[i][j]
# rotation about axis, angle AND point includes also translation
t = subtract_vectors(point, multiply_matrix_vector(R, point))
M[0][3] = t[0]
M[1][3] = t[1]
M[2][3] = t[2]
return M
def matrix_from_axis_angle_vector(axis_angle_vector, point=[0, 0, 0]):
"""Calculates a rotation matrix from an axis-angle vector.
Parameters
----------
axis_angle_vector : [float, float, float]
Three numbers that represent the axis of rotation and angle of rotation
through the vector's magnitude.
point : [float, float, float] | :class:`~compas.geometry.Point`, optional
A point to perform a rotation around an origin other than [0, 0, 0].
Returns
-------
list[list[float]]
The 4x4 transformation matrix representing a rotation.
Examples
--------
>>> aav1 = [-0.043, -0.254, 0.617]
>>> R = matrix_from_axis_angle_vector(aav1)
>>> aav2 = axis_angle_vector_from_matrix(R)
>>> allclose(aav1, aav2)
True
"""
axis = list(axis_angle_vector)
angle = length_vector(axis_angle_vector)
return matrix_from_axis_and_angle(axis, angle, point)
def axis_and_angle_from_matrix(M):
"""Returns the axis and the angle of the rotation matrix M.
Parameters
----------
M : list[list[float]]
The 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The rotation axis.
float
The rotation angle in radians.
"""
fabs = math.fabs
sqrt = math.sqrt
eps = 0.01 # margin to allow for rounding errors
eps2 = 0.1 # margin to distinguish between 0 and 180 degrees
if all(fabs(M[i][j] - M[j][i]) < eps for i, j in [(0, 1), (0, 2), (1, 2)]):
if (all(fabs(M[i][j] - M[j][i]) < eps2 for i, j in [(0, 1), (0, 2), (1, 2)]) and
fabs(M[0][0] + M[1][1] + M[2][2] - 3) < eps2):
return [0, 0, 0], 0
angle = math.pi
xx = (M[0][0] + 1) / 2
yy = (M[1][1] + 1) / 2
zz = (M[2][2] + 1) / 2
xy = (M[0][1] + M[1][0]) / 4
xz = (M[0][2] + M[2][0]) / 4
yz = (M[1][2] + M[2][1]) / 4
root_half = sqrt(0.5)
if (xx > yy) and (xx > zz):
if xx < eps:
axis = [0, root_half, root_half]
else:
x = sqrt(xx)
axis = [x, xy / x, xz / x]
elif yy > zz:
if yy < eps:
axis = [root_half, 0, root_half]
else:
y = sqrt(yy)
axis = [xy / y, y, yz / y]
else:
if zz < eps:
axis = [root_half, root_half, 0]
else:
z = sqrt(zz)
axis = [xz / z, yz / z, z]
return axis, angle
s = sqrt(
(M[2][1] - M[1][2]) * (M[2][1] - M[1][2]) +
(M[0][2] - M[2][0]) * (M[0][2] - M[2][0]) +
(M[1][0] - M[0][1]) * (M[1][0] - M[0][1]))
# should this also be an eps?
if fabs(s) < 0.001:
s = 1
angle = math.acos((M[0][0] + M[1][1] + M[2][2] - 1) / 2)
x = (M[2][1] - M[1][2]) / s
y = (M[0][2] - M[2][0]) / s
z = (M[1][0] - M[0][1]) / s
return [x, y, z], angle
def axis_angle_vector_from_matrix(M):
"""Returns the axis-angle vector of the rotation matrix M.
Parameters
----------
M : list[list[float]]
The 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The axis-angle vector.
"""
axis, angle = axis_and_angle_from_matrix(M)
return scale_vector(axis, angle)
def matrix_from_quaternion(quaternion):
"""Calculates a rotation matrix from quaternion coefficients.
Parameters
----------
quaternion : [float, float, float, float]
Four numbers that represents the four coefficient values of a quaternion.
Returns
-------
list[list[float]]
The 4x4 transformation matrix representing a rotation.
Raises
------
ValueError
If quaternion is invalid.
Examples
--------
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> R = matrix_from_quaternion(q1)
>>> q2 = quaternion_from_matrix(R)
>>> allclose(q1, q2, tol=1e-03)
True
"""
sqrt = math.sqrt
q = quaternion
n = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2 # dot product
# perhaps this should not be hard-coded?
eps = 1.0e-15
if n < eps:
raise ValueError("Invalid quaternion, dot product must be != 0.")
q = [v * sqrt(2.0 / n) for v in q]
q = [[q[i] * q[j] for i in range(4)] for j in range(4)] # outer_product
rotation = [
[1.0 - q[2][2] - q[3][3], q[1][2] - q[3][0], q[1][3] + q[2][0], 0.0],
[q[1][2] + q[3][0], 1.0 - q[1][1] - q[3][3], q[2][3] - q[1][0], 0.0],
[q[1][3] - q[2][0], q[2][3] + q[1][0], 1.0 - q[1][1] - q[2][2], 0.0],
[0.0, 0.0, 0.0, 1.0]]
return rotation
def quaternion_from_matrix(M):
"""Returns the 4 quaternion coefficients from a rotation matrix.
Parameters
----------
M : list[list[float]]
The coefficients of the rotation matrix, row per row.
Returns
-------
[float, float, float, float]
The quaternion coefficients.
Examples
--------
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> R = matrix_from_quaternion(q1)
>>> q2 = quaternion_from_matrix(R)
>>> allclose(q1, q2, tol=1e-03)
True
"""
sqrt = math.sqrt
qw, qx, qy, qz = 0, 0, 0, 0
trace = M[0][0] + M[1][1] + M[2][2]
if trace > 0.0:
s = 0.5 / sqrt(trace + 1.0)
qw = 0.25 / s
qx = (M[2][1] - M[1][2]) * s
qy = (M[0][2] - M[2][0]) * s
qz = (M[1][0] - M[0][1]) * s
elif (M[0][0] > M[1][1]) and (M[0][0] > M[2][2]):
s = 2.0 * sqrt(1.0 + M[0][0] - M[1][1] - M[2][2])
qw = (M[2][1] - M[1][2]) / s
qx = 0.25 * s
qy = (M[0][1] + M[1][0]) / s
qz = (M[0][2] + M[2][0]) / s
elif M[1][1] > M[2][2]:
s = 2.0 * sqrt(1.0 + M[1][1] - M[0][0] - M[2][2])
qw = (M[0][2] - M[2][0]) / s
qx = (M[0][1] + M[1][0]) / s
qy = 0.25 * s
qz = (M[1][2] + M[2][1]) / s
else:
s = 2.0 * sqrt(1.0 + M[2][2] - M[0][0] - M[1][1])
qw = (M[1][0] - M[0][1]) / s
qx = (M[0][2] + M[2][0]) / s
qy = (M[1][2] + M[2][1]) / s
qz = 0.25 * s
return [qw, qx, qy, qz]
def matrix_from_basis_vectors(xaxis, yaxis):
"""Creates a rotation matrix from basis vectors (= orthonormal vectors).
Parameters
----------
xaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The x-axis of the frame.
yaxis : [float, float, float] | :class:`~compas.geometry.Vector`
The y-axis of the frame.
Returns
-------
list[list[float]]
A 4x4 transformation matrix representing a rotation.
Notes
-----
.. code-block:: none
[ x0 y0 z0 0 ]
[ x1 y1 z1 0 ]
[ x2 y2 z2 0 ]
[ 0 0 0 1 ]
Examples
--------
>>> xaxis = [0.68, 0.68, 0.27]
>>> yaxis = [-0.67, 0.73, -0.15]
>>> R = matrix_from_basis_vectors(xaxis, yaxis)
"""
xaxis = normalize_vector(list(xaxis))
yaxis = normalize_vector(list(yaxis))
zaxis = cross_vectors(xaxis, yaxis)
yaxis = cross_vectors(zaxis, xaxis)
R = identity_matrix(4)
R[0][0], R[1][0], R[2][0] = xaxis
R[0][1], R[1][1], R[2][1] = yaxis
R[0][2], R[1][2], R[2][2] = zaxis
return R
def basis_vectors_from_matrix(R):
"""Returns the basis vectors from the rotation matrix R.
Parameters
----------
R : list[list[float]]
A 4-by-4 transformation matrix, or a 3-by-3 rotation matrix.
Returns
-------
[float, float, float]
The first basis vector of the rotation.
[float, float, float]
The second basis vector of the rotation.
Raises
------
ValueError
If rotation matrix is invalid.
Examples
--------
>>> from compas.geometry import Frame
>>> f = Frame([0, 0, 0], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> R = matrix_from_frame(f)
>>> xaxis, yaxis = basis_vectors_from_matrix(R)
"""
xaxis = [R[0][0], R[1][0], R[2][0]]
yaxis = [R[0][1], R[1][1], R[2][1]]
zaxis = [R[0][2], R[1][2], R[2][2]]
if not allclose(zaxis, cross_vectors(xaxis, yaxis)):
raise ValueError("Matrix is invalid rotation matrix.")
return xaxis, yaxis
def matrix_from_translation(translation):
"""Returns a 4x4 translation matrix in row-major order.
Parameters
----------
translation : [float, float, float]
The x, y and z components of the translation.
Returns
-------
list[list[float]]
The 4x4 transformation matrix representing a translation.
Notes
-----
.. code-block:: none
[ . . . 0 ]
[ . . . 1 ]
[ . . . 2 ]
[ . . . . ]
Examples
--------
>>> T = matrix_from_translation([1, 2, 3])
"""
M = identity_matrix(4)
M[0][3] = float(translation[0])
M[1][3] = float(translation[1])
M[2][3] = float(translation[2])
return M
def translation_from_matrix(M):
"""Returns the 3 values of translation from the matrix M.
Parameters
----------
M : list[list[float]]
A 4-by-4 transformation matrix.
Returns
-------
[float, float, float]
The translation vector.
"""
return [M[0][3], M[1][3], M[2][3]]
def matrix_from_orthogonal_projection(plane):
"""Returns an orthogonal projection matrix to project onto a plane.
Parameters
----------
plane : [point, normal] | :class:`~compas.geometry.Plane`
The plane to project onto.
Returns
-------
list[list[float]]
The 4x4 transformation matrix representing an orthogonal projection.
Examples
--------
>>> point = [0, 0, 0]
>>> normal = [0, 0, 1]
>>> plane = (point, normal)
>>> P = matrix_from_orthogonal_projection(plane)
"""
point, normal = plane
T = identity_matrix(4)
normal = normalize_vector(normal)
for j in range(3):
for i in range(3):
T[i][j] -= normal[i] * normal[j] # outer_product
T[0][3], T[1][3], T[2][3] = scale_vector(normal, dot_vectors(point, normal))
return T
def matrix_from_parallel_projection(plane, direction):
"""Returns an parallel projection matrix to project onto a plane.
Parameters
----------
plane : [point, normal] | :class:`~compas.geometry.Plane`
The plane to project onto.
direction : [float, float, float] | :class:`~compas.geometry.Vector`
Direction of the projection.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Examples
--------
>>> point = [0, 0, 0]
>>> normal = [0, 0, 1]
>>> plane = (point, normal)
>>> direction = [1, 1, 1]
>>> P = matrix_from_parallel_projection(plane, direction)
"""
point, normal = plane
T = identity_matrix(4)
normal = normalize_vector(normal)
scale = dot_vectors(direction, normal)
for j in range(3):
for i in range(3):
T[i][j] -= direction[i] * normal[j] / scale
T[0][3], T[1][3], T[2][3] = scale_vector(direction, dot_vectors(point, normal) / scale)
return T
def matrix_from_perspective_projection(plane, center_of_projection):
"""Returns a perspective projection matrix to project onto a plane along lines that emanate from a single point, called the center of projection.
Parameters
----------
plane : [point, normal] | :class:`~compas.geometry.Plane`
The plane to project onto.
center_of_projection : [float, float, float] | :class:`~compas.geometry.Point`
The camera view point.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Examples
--------
>>> point = [0, 0, 0]
>>> normal = [0, 0, 1]
>>> plane = (point, normal)
>>> center_of_projection = [1, 1, 0]
>>> P = matrix_from_perspective_projection(plane, center_of_projection)
"""
point, normal = plane
T = identity_matrix(4)
normal = normalize_vector(normal)
T[0][0] = T[1][1] = T[2][2] = dot_vectors(subtract_vectors(center_of_projection, point), normal)
for j in range(3):
for i in range(3):
T[i][j] -= center_of_projection[i] * normal[j]
T[0][3], T[1][3], T[2][3] = scale_vector(center_of_projection, dot_vectors(point, normal))
for i in range(3):
T[3][i] -= normal[i]
T[3][3] = dot_vectors(center_of_projection, normal)
return T
def matrix_from_perspective_entries(perspective):
"""Returns a matrix from perspective entries.
Parameters
----------
values : [float, float, float, float]
The 4 perspective entries of a matrix.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Notes
-----
.. code-block:: none
[ . . . . ]
[ . . . . ]
[ . . . . ]
[ 0 1 2 3 ]
"""
M = identity_matrix(4)
M[3][0] = float(perspective[0])
M[3][1] = float(perspective[1])
M[3][2] = float(perspective[2])
M[3][3] = float(perspective[3])
return M
def matrix_from_shear_entries(shear_entries):
"""Returns a shear matrix from the 3 factors for x-y, x-z, and y-z axes.
Parameters
----------
shear_entries : [float, float, float]
The 3 shear factors for x-y, x-z, and y-z axes.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Notes
-----
.. code-block:: none
[ . 0 1 . ]
[ . . 2 . ]
[ . . . . ]
[ . . . . ]
Examples
--------
>>> Sh = matrix_from_shear_entries([1, 2, 3])
"""
M = identity_matrix(4)
M[0][1] = float(shear_entries[0])
M[0][2] = float(shear_entries[1])
M[1][2] = float(shear_entries[2])
return M
def matrix_from_shear(angle, direction, point, normal):
"""Constructs a shear matrix by an angle along the direction vector on the
shear plane (defined by point and normal).
Parameters
----------
angle : float
The angle in radians.
direction : [float, float, float] | :class:`~compas.geometry.Vector`
The direction vector as list of 3 numbers.
It must be orthogonal to the normal vector.
point : [float, float, float] | :class:`~compas.geometry.Point`
The point of the shear plane as list of 3 numbers.
normal : [float, float, float] | :class:`~compas.geometry.Vector`
The normal of the shear plane as list of 3 numbers.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Raises
------
ValueError
If direction and normal are not orthogonal.
Notes
-----
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane (defined by point and normal).
Examples
--------
>>> angle = 0.1
>>> direction = [0.1, 0.2, 0.3]
>>> point = [4, 3, 1]
>>> normal = cross_vectors(direction, [1, 0.3, -0.1])
>>> S = matrix_from_shear(angle, direction, point, normal)
"""
fabs = math.fabs
normal = normalize_vector(normal)
direction = normalize_vector(direction)
if fabs(dot_vectors(normal, direction)) > _EPS:
raise ValueError('Direction and normal vectors are not orthogonal')
angle = math.tan(angle)
M = identity_matrix(4)
for j in range(3):
for i in range(3):
M[i][j] += angle * direction[i] * normal[j]
M[0][3], M[1][3], M[2][3] = scale_vector(direction, -angle * dot_vectors(point, normal))
return M
def matrix_from_scale_factors(scale_factors):
"""Returns a 4x4 scaling transformation.
Parameters
----------
scale_factors : [float, float, float]
Three numbers defining the scaling factors in x, y, and z respectively.
Returns
-------
list[list[float]]
A 4-by-4 transformation matrix.
Notes
-----
.. code-block:: python
[ 0 . . . ]
[ . 1 . . ]
[ . . 2 . ]
[ . . . . ]
Examples
--------
>>> Sc = matrix_from_scale_factors([1, 2, 3])
"""
M = identity_matrix(4)
M[0][0] = float(scale_factors[0])
M[1][1] = float(scale_factors[1])
M[2][2] = float(scale_factors[2])
return M
def quaternion_from_euler_angles(e, static=True, axes='xyz'):
"""Returns a quaternion from Euler angles.
Parameters
----------
euler_angles : [float, float, float]
Three numbers that represent the angles of rotations about the specified axes.
static : bool, optional
If True, the rotations are applied to a static frame.
If False, the rotations are applied to a rotational frame.
axes : str, optional
A three-character string specifying the order of the axes.
Returns
-------
[float, float, float, float]
Quaternion as a list of four real values ``[w, x, y, z]``.
"""
m = matrix_from_euler_angles(e, static, axes)
q = quaternion_from_matrix(m)
return q
def euler_angles_from_quaternion(q, static=True, axes='xyz'):
"""Returns Euler angles from a quaternion.
Parameters
----------
quaternion : [float, float, float, float]
Quaternion as a list of four real values ``[w, x, y, z]``.
static : bool, optional
If True, the rotations are applied to a static frame.
If False, the rotations are applied to a rotational frame.
axes : str, optional
A three-character string specifying the order of the axes.
Returns
-------
[float, float, float]
Euler angles as a list of three real values ``[a, b, c]``.
"""
m = matrix_from_quaternion(q)
e = euler_angles_from_matrix(m, static, axes)
return e
def quaternion_from_axis_angle(axis, angle):
"""Returns a quaternion describing a rotation around the given axis by the given angle.
Parameters
----------
axis : [float, float, float] | :class:`~compas.geometry.Vector`
XYZ coordinates of the rotation axis vector.
angle : float
Angle of rotation in radians.
Returns
-------
[float, float, float, float]
Quaternion as a list of four real values ``[qw, qx, qy, qz]``.
Examples
--------
>>> axis = [1.0, 0.0, 0.0]
>>> angle = math.pi/2
>>> q = quaternion_from_axis_angle(axis, angle)
>>> allclose(q, [math.sqrt(2)/2, math.sqrt(2)/2, 0, 0])
True
"""
m = matrix_from_axis_and_angle(axis, angle, None)
q = quaternion_from_matrix(m)
return q
def axis_angle_from_quaternion(q):
"""Returns an axis and an angle of rotation from the given quaternion.
Parameters
----------
q : [float, float, float, float]
Quaternion as a list of four real values ``[qw, qx, qy, qz]``.
Returns
-------
axis : [float, float, float]
XYZ coordinates of the rotation axis vector.
angle : float
Angle of rotation in radians.
Examples
--------
>>> q = [1., 1., 0., 0.]
>>> axis, angle = axis_angle_from_quaternion(q)
>>> allclose(axis, [1., 0., 0.])
True
>>> allclose([angle], [math.pi/2], 1e-6)
True
"""
m = matrix_from_quaternion(q)
axis, angle = axis_and_angle_from_matrix(m)
return axis, angle
|
compas-dev/compas
|
src/compas/geometry/transformations/matrices.py
|
Python
|
mit
| 41,266
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
sys.path.insert(0, os.getcwd())
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from studio.core.engines import db
from microsite import app
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
with app.app_context():
config.set_main_option('sqlalchemy.url', app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
qisanstudio/qsapp-microsite
|
src/microsite/migration/env.py
|
Python
|
mit
| 2,298
|
from django.forms import ModelForm
from ..models import MaternalEligibilityLoss
class MaternalEligibilityLossForm(ModelForm):
class Meta:
model = MaternalEligibilityLoss
fields = '__all__'
|
botswana-harvard/tshilo-dikotla
|
td_maternal/forms/maternal_eligibility_loss_form.py
|
Python
|
gpl-2.0
| 213
|
"""parses configuration and returns useful things"""
#pylint: disable=relative-import
#pylint: disable=too-many-ancestors
from etl_framework.ExtractorConfig import ExtractorConfig
from etl_framework.config_mixins.SleepMixin import SleepMixin
from etl_framework.config_mixins.BatchMixin import BatchMixin
from etl_framework.config_mixins.FiltersMixin import FiltersMixin
#from etl_framework.config_mixins.DestinationMixin import DestinationMixin
from gcloud.configs.mixins.gcloud import GcloudMixin
class PubsubExtractorConfig(
ExtractorConfig,
FiltersMixin,
GcloudMixin,
SleepMixin,
BatchMixin
):
"""parses configuration files"""
@property
def message_flusher(self):
"""stuff"""
return self.config.get('message_flusher')
@message_flusher.setter
def message_flusher(self, message_flusher):
"""stuff"""
self.config['message_flusher'] = message_flusher
@property
def pubsub_topic_name(self):
"""stuff"""
return self.config.get('pubsub_topic_name')
@property
def extractor_loaders(self):
"""stuff"""
return self.config.get('extractor_loaders')
|
pantheon-systems/etl-framework
|
gcloud/configs/pubsub_extractor.py
|
Python
|
mit
| 1,171
|
# Generated by Django 2.2.13 on 2021-02-05 10:34
from django.db import migrations
from django.db.models import Count
from base.models.group_element_year import GroupElementYear
YEAR_FROM = 2019
def fix_order(apps, schema_editor):
problematic_element_parents = find_problematic_parents()
print("Problematic parents: {}".format(len(problematic_element_parents)))
for parent_element_id in problematic_element_parents:
reorder_children(parent_element_id)
print(str(parent_element_id))
def find_problematic_parents():
return GroupElementYear.objects.filter(
parent_element__group_year__academic_year__year__gte=YEAR_FROM
).values(
"parent_element",
"order",
).annotate(
num_children_order=Count("order")
).filter(
num_children_order__gt=1
).values_list("parent_element", flat=True)
def reorder_children(parent_element_id: int):
links = GroupElementYear.objects.filter(parent_element__id=parent_element_id).order_by("order")
for order, link in enumerate(links, start=1):
link.order = order
link.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0565_auto_20210203_1310'),
]
operations = [
migrations.RunPython(fix_order, elidable=True, reverse_code=migrations.RunPython.noop),
]
|
uclouvain/osis
|
base/migrations/0566_fix_link_orders.py
|
Python
|
agpl-3.0
| 1,354
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = "SKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
api_key_secret = "your_api_key_secret"
client = Client(api_key_sid, api_key_secret)
recordings = client.video.recordings \
.list(grouping_sid='RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
for recording in recordings:
print(recording.sid)
|
teoreteetik/api-snippets
|
video/rest/recordings/list-recordings-for-room/list-recordings-for-room.6.x.py
|
Python
|
mit
| 451
|
from abc import abstractmethod, abstractproperty
from ebu_tt_live.utils import AutoRegisteringABCMeta, AbstractStaticMember, validate_types_only
# Interfaces
# ==========
class INode(object):
"""
This is the foundation of all nodes that take part in the processing of subtitle documents.
The Node should deal with subtitles in a high level interface,
which is an instance of :class:`<ebu_tt_live.documents.SubtitleDocument>`. That is the interface which should
be used to communicate with the carriage mechanism. See :class:`<ebu_tt_live.carriage.ICarriageMechanism>`
"""
__metaclass__ = AutoRegisteringABCMeta
@abstractmethod
def process_document(self, document, **kwargs):
"""
The central hook that is meant to implement the main functionality of the node.
A node must implement this method.
:param kwargs: Extra parameters
:param document: Can be XML, Document object...etc. depending on the carriage implementation
"""
raise NotImplementedError()
class IProducerNode(INode):
_provides = AbstractStaticMember(validate_types_only)
@abstractmethod
def resume_producing(self):
"""
This allows the node to be triggered by events, such as a timer.
"""
@classmethod
def provides(cls):
"""
Data type provided
:return:
"""
if isinstance(cls._provides, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_provides\'')
return cls._provides
@abstractproperty
def producer_carriage(self):
"""
Carriage mechanism accessor
:return:
"""
@abstractmethod
def register_producer_carriage(self, producer_carriage):
"""
Output carriage mechanism registration
:param producer_carriage:
"""
class IConsumerNode(INode):
_expects = AbstractStaticMember(validate_types_only)
@classmethod
def expects(cls):
"""
Data type expected
:return:
"""
if isinstance(cls._expects, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_expects\'')
return cls._expects
@abstractproperty
def consumer_carriage(self):
"""
Carriage mechanism accessor
:return:
"""
@abstractmethod
def register_consumer_carriage(self, consumer_carriage):
"""
Input carriage mechanism registration
:param consumer_carriage:
"""
|
ebu/ebu-tt-live-toolkit
|
ebu_tt_live/node/interface.py
|
Python
|
bsd-3-clause
| 2,579
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8221")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8221")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Choic address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Choic address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
choicoin/chcoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,832
|
# Script to make "simple" geothermal models to show effects of shallow structures.
import numpy as np, sys, os, time, gzip, cPickle as pickle, scipy, gc
from glob import glob
sys.path.append('/tera_raid/gudni/gitCodes/simpeg')
import SimPEG as simpeg
import SimPEG
from SimPEG import NSEM
# Load the solver
sys.path.append('/tera_raid/gudni')
from pymatsolver import MumpsSolver
# Open files
freqList = np.load('MTfrequencies.npy')
locs = np.load('MTlocations.npy')
# Load the model
mesh, modDict = simpeg.Mesh.TensorMesh.readVTK('nsmesh_GKRCoarseHKPK1.vtr')
sigma = modDict['S/m']
bgsigma = np.ones_like(sigma)*1e-8
bgsigma[sigma > 9.999e-7] = 0.01
# A comment to make a commit
# for loc in locs:
# # NOTE: loc has to be a (1,3) np.ndarray otherwise errors accure
# for rxType in ['zxxr','zxxi','zxyr','zxyi','zyxr','zyxi','zyyr','zyyi']:
# rxList.append(simpegNSEM.SurveyNSEM.RxMT(simpeg.mkvc(loc,2).T,rxType))
# Make a receiver list
rxList = []
for rxType in ['zxxr','zxxi','zxyr','zxyi','zyxr','zyxi','zyyr','zyyi','tzxr','tzxi','tzyr','tzyi']:
rxList.append(NSEM.Rx(locs,rxType))
# Source list
srcList =[]
for freq in freqList:
srcList.append(NSEM.SrcNSEM.polxy_1Dprimary(rxList,freq))
# Survey MT
survey = NSEM.Survey(srcList)
# Background 1D model
sigma1d = mesh.r(bgsigma,'CC','CC','M')[0,0,:]
## Setup the problem object
problem = NSEM.Problem3D_ePrimSec(mesh,sigmaPrimary = sigma1d)
problem.verbose = True
problem.Solver = MumpsSolver
problem.pair(survey)
## Calculate the fields
stTime = time.time()
print 'Starting calculating field solution at ' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sys.stdout.flush()
FmtSer = problem.fields(sigma)
print 'Ended calculation field at ' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print 'Ran for {:f}'.format(time.time()-stTime)
## Project data
stTime = time.time()
print 'Starting projecting fields to data at ' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sys.stdout.flush()
mtData = NSEM.Data(survey,survey.eval(FmtSer))
print 'Ended projection of fields at ' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print 'Ran for {:f}'.format(time.time()-stTime)
mtStArr = mtData.toRecArray('Complex')
SimPEG.np.save('MTdataStArr_nsmesh_GKRHKPK1',mtStArr)
fieldsDict = {}
for freq in survey.freqs:
src = survey.getSrcByFreq(freq)
fieldsDict[freq] = {'e_pxSolution':FmtSer[src,'e_pxSolution'],'e_pySolution':FmtSer[src,'e_pySolution']}
with open('MTfields_HKPK1Coarse.pkl','wb') as out:
pickle.dump(fieldsDict,out,2)
del FmtSer, mtStArr, mtData
gc.collect()
# Read in the fields dicts
if False:
FmtSer = problem.fieldsPair()
for freq, fD in fieldsDict.iteritems():
src = survey.getSrcByFreq(freq)
FmtSer[src,'e_pxSolution'] = fD['e_pxSolution']
FmtSer[src,'e_pySolution'] = fD['e_pySolution']
|
simpeg/simpegExamples
|
SciPy2016/MTwork/ForwardModeling_noExtension_GKR/findDiam_MTforward_HKPK1.py
|
Python
|
mit
| 2,857
|
#!/usr/bin/env python
#This is a runtime-configurable implementation of the
#linear congruentialpseudo-random number generator,
#using the following formula:
#y = (a*z+b) mod m
#where:
#z = the last generate result (or the seed at startup)
#a,b,m = parameters (defaults generated randomly)
#
#Numbers created by the LC algorithm are NOT indended
#to be used for cryptographical purposes.
#Note that the LC is a lot slower than almost all other
#algorithms, especially the MersenneTwister.
from __future__ import with_statement
from random import *
from decimal import *
from math import *
import sys
from optparse import OptionParser
#Generate random defaults for the option parser
randm = randint()
randa = randint(randm)
randb = randint(randm)
randseed
#Initlialize the option parser
parser = OptionParser()
parser.enable_interspersed_args()
parser.add_option("-c",
"--count",
type="int",
action="store",
dest="count",
help="How many random numbers to generate")
parser.add_option("-o",
"--out",
dest="outfileName",
help="Output file")
parser.add_option("-a",
"--parama",
type="long",
dest="a",
help="Parameter a (multiplier)")
parser.add_option("-b",
"--paramb",
type="long",
dest="b",
help="Parameter b (increment)")
parser.add_option("-m",
"--paramm",
type="long",
dest="m",
help="Parameter m (modulus)")
parser.add_option("-s",
"--seed",
type="long",
dest="seed",
help="Seed (= last generator result)")
#Set defaults
parser.set_defaults(outfileName="rand.txt",
count=10000,
a=randa,
b=randb,
m=randm,
seed=randseed
)
#Parse
(options,args) = parser.parse_args()
#Global paramter aliases
a = options.a
b = options.b
m = options.m
lastres = options.seed
with open(options.outfileName,"w") as outfile:
for i in xrange(options.count):
lastres = (a * lastres + b) % m
print >> outfile,lastres
|
ulikoehler/entropy-analysis-tools
|
RandGen/scripts/lc.py
|
Python
|
gpl-3.0
| 1,933
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains a class that can be used handle all the ECP handling for other python
programs.
"""
import cookielib
import sys
from saml2 import soap
from saml2 import samlp
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import class_name
from saml2.profile import paos
from saml2.profile import ecp
from saml2.metadata import MetaData
SERVICE = "urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
PAOS_HEADER_INFO = 'ver="%s";"%s"' % (paos.NAMESPACE, SERVICE)
class Client(object):
def __init__(self, user, passwd, sp="", idp=None, metadata_file=None,
xmlsec_binary=None, verbose=0, ca_certs="",
disable_ssl_certificate_validation=True, logger=None,
debug=False):
"""
:param user: user name
:param passwd: user password
:param sp: The SP URL
:param idp: The IdP PAOS endpoint
:param metadata_file: Where the metadata file is if used
:param xmlsec_binary: Where the xmlsec1 binary can be found
:param verbose: Chatty or not
:param ca_certs: is the path of a file containing root CA certificates
for SSL server certificate validation.
:param disable_ssl_certificate_validation: If
disable_ssl_certificate_validation is true, SSL cert validation
will not be performed.
:param logger: Somewhere to write logs to
:param debug: Whether debug output is needed
"""
self._idp = idp
self._sp = sp
self.user = user
self.passwd = passwd
self.log = logger
self.debug = debug
self._verbose = verbose
if metadata_file:
self._metadata = MetaData()
self._metadata.import_metadata(open(metadata_file).read(),
xmlsec_binary)
self._debug_info("Loaded metadata from '%s'" % metadata_file)
else:
self._metadata = None
self.cookie_handler = None
self.done_ecp = False
self.cookie_jar = cookielib.LWPCookieJar()
self.http = soap.HTTPClient(self._sp, cookiejar=self.cookie_jar,
ca_certs=ca_certs,
disable_ssl_certificate_validation=disable_ssl_certificate_validation)
def _debug_info(self, text):
if self.debug:
if self.log:
self.log.debug(text)
if self._verbose:
print >> sys.stderr, text
def find_idp_endpoint(self, idp_entity_id):
if self._idp:
return self._idp
if idp_entity_id and not self._metadata:
raise Exception(
"Can't handle IdP entity ID if I don't have metadata")
if idp_entity_id:
for binding in [BINDING_PAOS, BINDING_SOAP]:
ssos = self._metadata.single_sign_on_services(idp_entity_id,
binding=binding)
if ssos:
self._idp = ssos[0]
if self.debug:
self.log.debug("IdP endpoint: '%s'" % self._idp)
return self._idp
raise Exception("No suitable endpoint found for entity id '%s'" % (
idp_entity_id,))
else:
raise Exception("No entity ID -> no endpoint")
def phase2(self, authn_request, rc_url, idp_entity_id, headers=None,
idp_endpoint=None, sign=False, sec=""):
"""
Doing the second phase of the ECP conversation
:param authn_request: The AuthenticationRequest
:param rc_url: The assertion consumer service url
:param idp_entity_id: The EntityID of the IdP
:param headers: Possible extra headers
:param idp_endpoint: Where to send it all
:param sign: If the message should be signed
:param sec: security context
:return: The response from the IdP
"""
idp_request = soap.make_soap_enveloped_saml_thingy(authn_request)
if sign:
_signed = sec.sign_statement_using_xmlsec(idp_request,
class_name(authn_request),
nodeid=authn_request.id)
idp_request = _signed
if not idp_endpoint:
idp_endpoint = self.find_idp_endpoint(idp_entity_id)
if self.user and self.passwd:
self.http.add_credentials(self.user, self.passwd)
self._debug_info("[P2] Sending request: %s" % idp_request)
# POST the request to the IdP
response = self.http.post(idp_request, headers=headers,
path=idp_endpoint)
self._debug_info("[P2] Got IdP response: %s" % response)
if response is None or response is False:
raise Exception(
"Request to IdP failed (%s): %s" % (self.http.response.status,
self.http.error_description))
# SAMLP response in a SOAP envelope body, ecp response in headers
respdict = soap.class_instances_from_soap_enveloped_saml_thingies(
response, [paos, ecp,samlp])
if respdict is None:
raise Exception("Unexpected reply from the IdP")
self._debug_info("[P2] IdP response dict: %s" % respdict)
idp_response = respdict["body"]
assert idp_response.c_tag == "Response"
self._debug_info("[P2] IdP AUTHN response: %s" % idp_response)
_ecp_response = None
for item in respdict["header"]:
if item.c_tag == "Response" and\
item.c_namespace == ecp.NAMESPACE:
_ecp_response = item
_acs_url = _ecp_response.assertion_consumer_service_url
if rc_url != _acs_url:
error = ("response_consumer_url '%s' does not match" % rc_url,
"assertion_consumer_service_url '%s" % _acs_url)
# Send an error message to the SP
fault_text = soap.soap_fault(error)
_ = self.http.post(fault_text, path=rc_url)
# Raise an exception so the user knows something went wrong
raise Exception(error)
return idp_response
#noinspection PyUnusedLocal
def ecp_conversation(self, respdict, idp_entity_id=None):
""" """
if respdict is None:
raise Exception("Unexpected reply from the SP")
self._debug_info("[P1] SP response dict: %s" % respdict)
# AuthnRequest in the body or not
authn_request = respdict["body"]
assert authn_request.c_tag == "AuthnRequest"
# ecp.RelayState among headers
_relay_state = None
_paos_request = None
for item in respdict["header"]:
if item.c_tag == "RelayState" and\
item.c_namespace == ecp.NAMESPACE:
_relay_state = item
if item.c_tag == "Request" and\
item.c_namespace == paos.NAMESPACE:
_paos_request = item
_rc_url = _paos_request.response_consumer_url
# **********************
# Phase 2 - talk to the IdP
# **********************
idp_response = self.phase2(authn_request, _rc_url, idp_entity_id)
# **********************************
# Phase 3 - back to the SP
# **********************************
sp_response = soap.make_soap_enveloped_saml_thingy(idp_response,
[_relay_state])
self._debug_info("[P3] Post to SP: %s" % sp_response)
headers = {'Content-Type': 'application/vnd.paos+xml', }
# POST the package from the IdP to the SP
response = self.http.post(sp_response, headers, _rc_url)
if not response:
if self.http.response.status == 302:
# ignore where the SP is redirecting us to and go for the
# url I started off with.
pass
else:
print self.http.error_description
raise Exception(
"Error POSTing package to SP: %s" % self.http.response.reason)
self._debug_info("[P3] IdP response: %s" % response)
self.done_ecp = True
if self.debug:
self.log.debug("Done ECP")
return None
def operation(self, idp_entity_id, op, **opargs):
if "path" not in opargs:
opargs["path"] = self._sp
# ********************************************
# Phase 1 - First conversation with the SP
# ********************************************
# headers needed to indicate to the SP that I'm ECP enabled
if "headers" in opargs and opargs["headers"]:
opargs["headers"]["PAOS"] = PAOS_HEADER_INFO
if "Accept" in opargs["headers"]:
opargs["headers"]["Accept"] += ";application/vnd.paos+xml"
elif "accept" in opargs["headers"]:
opargs["headers"]["Accept"] = opargs["headers"]["accept"]
opargs["headers"]["Accept"] += ";application/vnd.paos+xml"
del opargs["headers"]["accept"]
else:
opargs["headers"] = {
'Accept': 'text/html; application/vnd.paos+xml',
'PAOS': PAOS_HEADER_INFO
}
# request target from SP
# can remove the PAOS header now
# try:
# del opargs["headers"]["PAOS"]
# except KeyError:
# pass
response = op(**opargs)
self._debug_info("[Op] SP response: %s" % response)
if not response:
raise Exception(
"Request to SP failed: %s" % self.http.error_description)
# The response might be a AuthnRequest instance in a SOAP envelope
# body. If so it's the start of the ECP conversation
# Two SOAP header blocks; paos:Request and ecp:Request
# may also contain a ecp:RelayState SOAP header block
# If channel-binding was part of the PAOS header any number of
# <cb:ChannelBindings> header blocks may also be present
# if 'holder-of-key' option then one or more <ecp:SubjectConfirmation>
# header blocks may also be present
try:
respdict = soap.class_instances_from_soap_enveloped_saml_thingies(
response,
[paos, ecp,
samlp])
self.ecp_conversation(respdict, idp_entity_id)
# should by now be authenticated so this should go smoothly
response = op(**opargs)
except (soap.XmlParseError, AssertionError, KeyError):
pass
#print "RESP",response, self.http.response
if not response:
if self.http.response.status != 404:
raise Exception("Error performing operation: %s" % (
self.http.error_description,))
return response
def delete(self, path=None, idp_entity_id=None):
return self.operation(idp_entity_id, self.http.delete, path=path)
def get(self, path=None, idp_entity_id=None, headers=None):
return self.operation(idp_entity_id, self.http.get, path=path,
headers=headers)
def post(self, path=None, data="", idp_entity_id=None, headers=None):
return self.operation(idp_entity_id, self.http.post, data=data,
path=path, headers=headers)
def put(self, path=None, data="", idp_entity_id=None, headers=None):
return self.operation(idp_entity_id, self.http.put, data=data,
path=path, headers=headers)
|
natebeacham/saml2
|
src/saml2/ecp_client.py
|
Python
|
bsd-2-clause
| 12,424
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming SKQP testing.
DEPS = [
'flavor',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'run',
'vars',
]
def test_firebase_steps(api):
"""Test an APK on Firebase Testlab."""
wlist_file = api.vars.slave_dir.join('whitelist_devices.json')
apk_file = api.vars.slave_dir.join('out','devrel','skqp-universal-debug.apk')
upload_path = 'skia-stephana-test/testing/skqp-universal-debug.apk'
args = [
'run_testlab',
'--logtostderr',
'--devices', wlist_file,
'--upload_path', upload_path,
apk_file
]
api.run(api.flavor.step, 'run firebase testlab', cmd=args)
def RunSteps(api):
api.vars.setup()
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup()
test_firebase_steps(api)
api.run.check_failure()
def GenTests(api):
builder = 'Test-Debian9-Clang-GCE-CPU-AVX2-universal-devrel-All-Android_SKQP'
yield (
api.test(builder) +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
|
youtube/cobalt
|
third_party/skia/infra/bots/recipes/skqp_test.py
|
Python
|
bsd-3-clause
| 1,366
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.