repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
netroby/WinObjC | deps/3rdparty/cairolegacy/perf/make-html.py | 169 | 1893 | #!/usr/bin/python
from string import strip
from sys import stdin
targets = {}
smilies = {'slowdown': '☹' , 'speedup': '☺'}
for line in stdin:
line = map(strip, filter(None, line.split(' ')))
if 9 == len(line):
target, name = line[0:2]
factor, dir = line[-2:]
name = name.split('-')
name, size = '-'.join(name[:-1]), name[-1]
target_tests = targets.get(target, {})
name_tests = target_tests.get(name, {})
name_tests[int(size)] = (factor, dir)
target_tests[name] = name_tests
targets[target] = target_tests
print '''\
<html><head>
<title>Performance Changes</title>
<style type="text/css">/*<![CDATA[*/
body { background: white; color: black; }
table { border-collapse: collapse; }
th, td { border: 1px solid silver; padding: 0.2em; }
td { text-align: center; }
th:first-child { text-align: left; }
th { background: #eee; }
/* those colors also should work for color blinds */
td.slowdown { background: #f93; }
td.speedup { background: #6f9; }
/*]]>*/</style>
</head><body>
<h1>Performance Changes</h1>'''
targets = targets.items()
targets.sort(lambda a, b: cmp(a[0], b[0]))
for target, names in targets:
sizes = {}
for tests in names.values():
for size in tests.keys():
sizes[size] = True
sizes = sizes.keys()
sizes.sort()
names = names.items()
names.sort(lambda a, b: cmp(a[0], b[0]))
print '<h2><a name="%s">%s</a></h2>' % (target, target)
print '<table><thead><tr><th> </th>'
for size in sizes:
print '<th>%s</th>' % size
print '</tr></thead><tbody>'
for name, tests in names:
print '<tr><th>%s</th>' % name
for size in sizes:
result = tests.get(size)
if result:
factor, dir = result
print '<td class="%s">%s %s</td>' % (
dir, factor, smilies[dir])
else:
print '<td> </td>'
print '</tr>'
print '</tbody></table>'
print '</body></html>'
| mit |
CMPUT404/socialdistribution | api/serializers/content.py | 1 | 2579 | from rest_framework import serializers
from ..models.content import Post, Comment
from ..models.author import Author
from author import CompactAuthorSerializer
from django.conf import settings
from api_settings import settings as api_settings
class SourceSerializer(serializers.URLField):
# Get path this post object was called from
def get_attribute(self, post):
return self.context.get('source', settings.HOST)
class OriginSerializer(serializers.URLField):
# Get path this post object was called from
def get_attribute(self, post):
return self.context.get('origin', settings.HOST)
class CommentSerializer(serializers.ModelSerializer):
author = CompactAuthorSerializer(many=False, read_only=True)
class Meta:
model = Comment
fields = ('guid', 'comment', 'pubDate', 'author')
read_only_fields = ('guid', 'pubDate')
def create(self, validated_data):
"""
Creat a comment for a given user and post
Requires an authenticated user and a Post model passed in as context!
"""
request = self.context.get('request', None)
_post = self.context.get('post', None)
_author = Author.objects.get(user = request.user)
comment = Comment(author = _author, post = _post, **validated_data)
comment.save()
return comment
class PostSerializer(serializers.ModelSerializer):
author = CompactAuthorSerializer(many = False, read_only = True)
comments = CommentSerializer(read_only = True, many = True)
categories = serializers.ListField(required=False)
source = SourceSerializer(read_only = True)
origin = OriginSerializer(read_only = True)
visibility = serializers.CharField()
image = serializers.CharField(required=False)
class Meta:
model = Post
fields = ('guid', 'title', 'source', 'origin', 'content', 'contentType', \
'pubDate', 'visibility', 'image', 'author', 'comments', 'categories')
read_only_fields = ('guid', 'pubDate', 'comments', 'author', 'visibility')
def to_representation(self, data):
data = super(PostSerializer, self).to_representation(data)
data["content-type"] = data.pop('contentType')
return data
# DRF does not currently support creation of nested relations...
def create(self, validated_data):
request = self.context.get('request', None)
_author = Author.objects.get(user = request.user)
post = Post(author = _author, **validated_data)
post.save()
return post
| apache-2.0 |
KonstantinSchubert/micropayment-service | bitcoupon/bitcp2/views.py | 1 | 2878 | from django.http import HttpResponse, HttpResponseForbidden
from django.template import loader, RequestContext
import encryption
import decimal
from bitcp2.models import Coupon, Beneficiary
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def cashIn(request):
if "boosted.science" != request.POST["pennytoken-service-provider"]:
return HttpResponseForbidden('provider not supported')
value = Coupon.validate(request.POST["pennytoken-token-secret"])
if value is False:
return HttpResponseForbidden('coupon not valid')
else:
beneficairy = Beneficiary.objects.get(email=request.POST["payment-id"])
beneficairy.increment_payout(value)
return HttpResponse(str(value))
def www_load_tokens(request):
return render(request, 'bitcp2/load_tokens.html')
def www_about(request):
return render(request, 'bitcp2/about.html')
def www_index(request):
return render(request, 'bitcp2/index.html')
def make_new_token(price):
# this is not a view, but a helper method for the following view
coupon = Coupon(secret=encryption.getSecret(), price=price)
coupon.save()
return coupon
def www_tokens(request):
decimal.getcontext().prec = 2
price_per_token = decimal.Decimal(0.01)
price_paid = decimal.Decimal(request.POST["number_of_tokens"])
number_of_tokens = int(price_paid/price_per_token)
token_json = [make_new_token(0.01).get_output_info_dict() for i in range(number_of_tokens) ]
template = loader.get_template('bitcp2/tokens.html')
context = RequestContext(request,{
"token_json" : token_json,
})
return HttpResponse(template.render(context))
def www_testpage(request):
template = loader.get_template('bitcp2/test.html')
# we received a token that we must validate
import urllib, urllib2
token_valid = False
# trying to verify the token
if "pennytoken-service-provider" in request.GET:
url = 'http://micropayment-service.boosted.science/cashIn/'
#url = 'http://localhost:8000/cashIn/'
data = urllib.urlencode({"pennytoken-service-provider" : request.GET["pennytoken-service-provider"],
"pennytoken-token-secret" : request.GET["pennytoken-token-secret"],
"payment-id" : "schubert.konstantin@gmail.com"
})
try:
token_value = urllib2.urlopen(url=url, data=data).read()
if float(token_value) > 0.005:
token_valid = True
except urllib2.HTTPError, err:
if err.code == 403:
pass # token is invalid
else:
raise
context = RequestContext(request,{
"token_valid" : token_valid,
})
return HttpResponse(template.render(context))
| mit |
jmartinezchaine/OpenERP | openerp/addons/delivery/wizard/__init__.py | 10 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery_sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JasonCormie/ansible-modules-extras | notification/mail.py | 44 | 10185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
author: "Dag Wieers (@dagwieers)"
module: mail
short_description: Send an email
description:
- This module is useful for sending emails from playbooks.
- One may wonder why automate sending emails? In complex environments
there are from time to time processes that cannot be automated, either
because you lack the authority to make it so, or because not everyone
agrees to a common approach.
- If you cannot automate a specific step, but the step is non-blocking,
sending out an email to the responsible party to make him perform his
part of the bargain is an elegant way to put the responsibility in
someone else's lap.
- Of course sending out a mail can be equally useful as a way to notify
one or more people in a team that a specific action has been
(successfully) taken.
version_added: "0.8"
options:
from:
description:
- The email-address the mail is sent from. May contain address and phrase.
default: root
required: false
to:
description:
- The email-address(es) the mail is being sent to. This is
a comma-separated list, which may contain address and phrase portions.
default: root
required: false
cc:
description:
- The email-address(es) the mail is being copied to. This is
a comma-separated list, which may contain address and phrase portions.
required: false
bcc:
description:
- The email-address(es) the mail is being 'blind' copied to. This is
a comma-separated list, which may contain address and phrase portions.
required: false
subject:
description:
- The subject of the email being sent.
required: true
body:
description:
- The body of the email being sent.
default: $subject
required: false
username:
description:
- If SMTP requires username
default: null
required: false
version_added: "1.9"
password:
description:
- If SMTP requires password
default: null
required: false
version_added: "1.9"
host:
description:
- The mail server
default: 'localhost'
required: false
port:
description:
- The mail server port
default: '25'
required: false
version_added: "1.0"
attach:
description:
- A space-separated list of pathnames of files to attach to the message.
Attached files will have their content-type set to C(application/octet-stream).
default: null
required: false
version_added: "1.0"
headers:
description:
- A vertical-bar-separated list of headers which should be added to the message.
Each individual header is specified as C(header=value) (see example below).
default: null
required: false
version_added: "1.0"
charset:
description:
- The character set of email being sent
default: 'us-ascii'
required: false
subtype:
description:
- The minor mime type, can be either text or html. The major type is always text.
default: 'plain'
required: false
version_added: "2.0"
"""
EXAMPLES = '''
# Example playbook sending mail to root
- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.'
# Sending an e-mail using Gmail SMTP servers
- local_action: mail
host='smtp.gmail.com'
port=587
username=username@gmail.com
password='mysecret'
to="John Smith <john.smith@example.com>"
subject='Ansible-report'
body='System {{ ansible_hostname }} has been successfully provisioned.'
# Send e-mail to a bunch of users, attaching files
- local_action: mail
host='127.0.0.1'
port=2025
subject="Ansible-report"
body="Hello, this is an e-mail. I hope you like it ;-)"
from="jane@example.net (Jane Jolie)"
to="John Doe <j.d@example.org>, Suzie Something <sue@example.com>"
cc="Charlie Root <root@localhost>"
attach="/etc/group /tmp/pavatar2.png"
headers=Reply-To=john@example.com|X-Special="Something or other"
charset=utf8
# Sending an e-mail using the remote machine, not the Ansible controller node
- mail:
host='localhost'
port=25
to="John Smith <john.smith@example.com>"
subject='Ansible-report'
body='System {{ ansible_hostname }} has been successfully provisioned.'
'''
import os
import sys
import smtplib
import ssl
try:
from email import encoders
import email.utils
from email.utils import parseaddr, formataddr
from email.mime.base import MIMEBase
from mail.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
except ImportError:
from email import Encoders as encoders
import email.Utils
from email.Utils import parseaddr, formataddr
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def main():
module = AnsibleModule(
argument_spec = dict(
username = dict(default=None),
password = dict(default=None, no_log=True),
host = dict(default='localhost'),
port = dict(default='25'),
sender = dict(default='root', aliases=['from']),
to = dict(default='root', aliases=['recipients']),
cc = dict(default=None),
bcc = dict(default=None),
subject = dict(required=True, aliases=['msg']),
body = dict(default=None),
attach = dict(default=None),
headers = dict(default=None),
charset = dict(default='us-ascii'),
subtype = dict(default='plain')
)
)
username = module.params.get('username')
password = module.params.get('password')
host = module.params.get('host')
port = module.params.get('port')
sender = module.params.get('sender')
recipients = module.params.get('to')
copies = module.params.get('cc')
blindcopies = module.params.get('bcc')
subject = module.params.get('subject')
body = module.params.get('body')
attach_files = module.params.get('attach')
headers = module.params.get('headers')
charset = module.params.get('charset')
subtype = module.params.get('subtype')
sender_phrase, sender_addr = parseaddr(sender)
if not body:
body = subject
try:
try:
smtp = smtplib.SMTP_SSL(host, port=int(port))
except (smtplib.SMTPException, ssl.SSLError):
smtp = smtplib.SMTP(host, port=int(port))
except Exception, e:
module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e))
smtp.ehlo()
if username and password:
if smtp.has_extn('STARTTLS'):
smtp.starttls()
try:
smtp.login(username, password)
except smtplib.SMTPAuthenticationError:
module.fail_json(msg="Authentication to %s:%s failed, please check your username and/or password" % (host, port))
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = formataddr((sender_phrase, sender_addr))
msg.preamble = "Multipart message"
if headers is not None:
for hdr in [x.strip() for x in headers.split('|')]:
try:
h_key, h_val = hdr.split('=')
msg.add_header(h_key, h_val)
except:
pass
if 'X-Mailer' not in msg:
msg.add_header('X-Mailer', "Ansible")
to_list = []
cc_list = []
addr_list = []
if recipients is not None:
for addr in [x.strip() for x in recipients.split(',')]:
to_list.append( formataddr( parseaddr(addr)) )
addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
if copies is not None:
for addr in [x.strip() for x in copies.split(',')]:
cc_list.append( formataddr( parseaddr(addr)) )
addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
if blindcopies is not None:
for addr in [x.strip() for x in blindcopies.split(',')]:
addr_list.append( parseaddr(addr)[1] )
if len(to_list) > 0:
msg['To'] = ", ".join(to_list)
if len(cc_list) > 0:
msg['Cc'] = ", ".join(cc_list)
part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
msg.attach(part)
if attach_files is not None:
for file in attach_files.split():
try:
fp = open(file, 'rb')
part = MIMEBase('application', 'octet-stream')
part.set_payload(fp.read())
fp.close()
encoders.encode_base64(part)
part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file))
msg.attach(part)
except Exception, e:
module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e))
composed = msg.as_string()
try:
smtp.sendmail(sender_addr, set(addr_list), composed)
except Exception, e:
module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e))
smtp.quit()
module.exit_json(changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
thiz11/kernel_common | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
haripradhan/MissionPlanner | Lib/site-packages/numpy/doc/byteswapping.py | 95 | 4783 | '''
=============================
Byteswapping and byte order
=============================
Introduction to byte ordering and ndarrays
==========================================
The ``ndarray`` is an object that provide a python array interface to data
in memory.
It often happens that the memory that you want to view with an array is
not of the same byte ordering as the computer on which you are running
Python.
For example, I might be working on a computer with a little-endian CPU -
such as an Intel Pentium, but I have loaded some data from a file
written by a computer that is big-endian. Let's say I have loaded 4
bytes from a file written by a Sun (big-endian) computer. I know that
these 4 bytes represent two 16-bit integers. On a big-endian machine, a
two-byte integer is stored with the Most Significant Byte (MSB) first,
and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
#. MSB integer 1
#. LSB integer 1
#. MSB integer 2
#. LSB integer 2
Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
>>> big_end_str
'\\x00\\x01\\x03\\x02'
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
770
Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
example, if our data represented a single unsigned 4-byte little-endian
integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
Returning to our ``big_end_arr`` - in this case our underlying data is
big-endian (data endianness) and we've set the dtype to match (the dtype
is also big-endian). However, sometimes you need to flip these around.
Changing byte ordering
======================
As you can imagine from the introduction, there are two ways you can
affect the relationship between the byte ordering of the array and the
underlying memory it is looking at:
* Change the byte-ordering information in the array dtype so that it
interprets the undelying data as being in a different byte order.
This is the role of ``arr.newbyteorder()``
* Change the byte-ordering of the underlying data, leaving the dtype
interpretation as it was. This is what ``arr.byteswap()`` does.
The common situations in which you need to change byte ordering are:
#. Your data and dtype endianess don't match, and you want to change
the dtype so that it matches the data.
#. Your data and dtype endianess don't match, and you want to swap the
data so that they match the dtype
#. Your data and dtype endianess match, but you want the data swapped
and the dtype to reflect this
Data and dtype endianness don't match, change dtype to match data
-----------------------------------------------------------------
We make something where they don't match:
>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
>>> wrong_end_dtype_arr[0]
256
The obvious fix for this situation is to change the dtype so it gives
the correct endianness:
>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
>>> fixed_end_dtype_arr[0]
1
Note the the array has not changed in memory:
>>> fixed_end_dtype_arr.tostring() == big_end_str
True
Data and type endianness don't match, change data to match dtype
----------------------------------------------------------------
You might want to do this if you need the data in memory to be a certain
ordering. For example you might be writing the memory out to a file
that needs a certain byte ordering.
>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
>>> fixed_end_mem_arr[0]
1
Now the array *has* changed in memory:
>>> fixed_end_mem_arr.tostring() == big_end_str
False
Data and dtype endianness match, swap data and dtype
----------------------------------------------------
You may have a correctly specified array dtype, but you need the array
to have the opposite byte order in memory, and you want the dtype to
match so the array values make sense. In this case you just do both of
the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
>>> swapped_end_arr.tostring() == big_end_str
False
'''
| gpl-3.0 |
sunqb/oa_qian | flask/Lib/site-packages/html5lib/inputstream.py | 35 | 30624 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| apache-2.0 |
moertle/_.py | _/web/application.py | 2 | 3754 |
import sys
import os
import signal
import socket
import logging
try:
import configparser
except ImportError:
import ConfigParser as configparser
import tornado.web
import tornado.ioloop
import _
from . import handlers
ioloop = tornado.ioloop.IOLoop.instance()
class Application(tornado.web.Application):
def __init__(self, section='server'):
signal.signal(signal.SIGINT, self.SignalHandler)
signal.signal(signal.SIGTERM, self.SignalHandler)
self.websockets = set()
# get the interface and port to listen on
try:
self.addr = _.settings.args.address or _.settings.config.get(section, 'address')
except Exception:
logging.debug('No address specified')
self.addr = '127.0.0.1'
try:
self.port = _.settings.args.port or _.settings.config.getint(section, 'port')
except Exception:
logging.debug('No port specified')
self.port = 8888
# load the cookie secret used to encrypt cookies
cookie_path = _.paths('etc', 'cookie.secret')
try:
with open(cookie_path, 'rb') as fp:
cookie_secret = fp.read(44)
except IOError:
cookie_secret = _.web.util.generateCookieSecret(cookie_path)
# SSL Options
if not hasattr(self, 'ssl_options'):
self.ssl_options = None
# URI patterns
if not hasattr(self, 'patterns'):
self.patterns = []
# Tornado settings
self.settings = dict(
static_path = _.paths('share', 'static'),
template_path = _.paths('share', 'templates'),
cookie_secret = cookie_secret,
debug = False
)
# useful during development
if _.settings.args.debug:
self.settings['debug'] = True
self.patterns.append(
( r'/src/(.*)', _.web.handlers.Source ),
)
if 'auth' in _.components.Registry:
component = None
for name in _.components.Registry['auth']:
component = _.components.Registry['auth'][name]
self.patterns.extend([(component.URL, component)])
if component is not None:
self.patterns.extend([( r'/logout', _.web.auth.Logout )])
logging.debug('Setting default login URL to: %s', component.URL)
self.settings['login_url'] = component.URL
def Listen(self, **kwds):
# initialize here so patterns and settings can be extended by plugins
tornado.web.Application.__init__(self, self.patterns, **self.settings)
if 'ssl_options' not in kwds:
kwds['ssl_options'] = self.ssl_options
if 'xheaders' not in kwds:
kwds['xheaders'] = True
try:
self.listen(self.port, self.addr, **kwds)
except socket.gaierror as e:
if 8 == e.errno:
raise _.error('Invalid address specified "%s"' % self.addr)
raise _.error('Could not listen: %s' % e)
except socket.error as e:
raise _.error('Could not listen: %s' % e)
except Exception as e:
logging.exception('Exception on listen')
raise _.error('Could not listen: %s' % e)
logging.info('Listening on %s:%d', self.addr, self.port)
ioloop.start()
def Broadcast(self, msg):
'Broadcast a message to all connected sockets'
for client in self.websockets:
client.write_message(msg)
def Stop(self):
ioloop.add_callback(ioloop.stop)
def SignalHandler(self, signum, frame):
logging.info('Terminating')
self.Stop()
| mit |
whitepages/nova | nova/objectstore/s3server.py | 17 | 14158 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module::
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import os
import os.path
import urllib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import fileutils
import routes
import six
import webob
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('buckets_path',
default=paths.state_path_def('buckets'),
help='Path to S3 buckets'),
cfg.StrOpt('s3_listen',
default="0.0.0.0",
help='IP address for S3 API to listen'),
cfg.IntOpt('s3_listen_port',
default=3333,
min=1,
max=65535,
help='Port for S3 API to listen'),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
def get_wsgi_server():
return wsgi.Server("S3 Objectstore",
S3Application(CONF.buckets_path),
port=CONF.s3_listen_port,
host=CONF.s3_listen)
class S3Application(wsgi.Router):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0, mapper=None):
versionutils.report_deprecated_feature(
LOG,
_LW('The in tree EC2 API is deprecated as of Kilo release and may '
'be removed in a future release. The stackforge ec2-api '
'project http://git.openstack.org/cgit/stackforge/ec2-api/ '
'is the target replacement for this functionality.')
)
if mapper is None:
mapper = routes.Mapper()
mapper.connect('/',
controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
mapper.connect('/{bucket}/{object_name}',
controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
mapper.connect('/{bucket_name}/',
controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
self.directory = os.path.abspath(root_directory)
fileutils.ensure_tree(self.directory)
self.bucket_depth = bucket_depth
super(S3Application, self).__init__(mapper)
class BaseRequestHandler(object):
"""Base class emulating Tornado's web framework pattern in WSGI.
This is a direct port of Tornado's implementation, so some key decisions
about how the code interacts have already been chosen.
The two most common ways of designing web frameworks can be
classified as async object-oriented and sync functional.
Tornado's is on the OO side because a response is built up in and using
the shared state of an object and one of the object's methods will
eventually trigger the "finishing" of the response asynchronously.
Most WSGI stuff is in the functional side, we pass a request object to
every call down a chain and the eventual return value will be a response.
Part of the function of the routing code in S3Application as well as the
code in BaseRequestHandler's __call__ method is to merge those two styles
together enough that the Tornado code can work without extensive
modifications.
To do that it needs to give the Tornado-style code clean objects that it
can modify the state of for each request that is processed, so we use a
very simple factory lambda to create new state for each request, that's
the stuff in the router, and when we let the Tornado code modify that
object to handle the request, then we return the response it generated.
This wouldn't work the same if Tornado was being more async'y and doing
other callbacks throughout the process, but since Tornado is being
relatively simple here we can be satisfied that the response will be
complete by the end of the get/post method.
"""
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, request):
method = request.method.lower()
f = getattr(self, method, self.invalid)
self.request = request
self.response = webob.Response()
params = request.environ['wsgiorg.routing_args'][1]
del params['controller']
f(**params)
return self.response
def get_argument(self, arg, default):
return self.request.params.get(arg, default)
def set_header(self, header, value):
self.response.headers[header] = value
def set_status(self, status_code):
self.response.status = status_code
def set_404(self):
self.render_xml({"Error": {
"Code": "NoSuchKey",
"Message": "The resource you requested does not exist"
}})
self.set_status(404)
def finish(self, body=''):
self.response.body = utils.utf8(body)
def invalid(self, **kwargs):
pass
def render_xml(self, value):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = value.keys()[0]
parts = []
parts.append('<' + utils.utf8(name) +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
self._render_parts(value.values()[0], parts)
parts.append('</' + utils.utf8(name) + '>')
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
''.join(parts))
def _render_parts(self, value, parts=None):
if not parts:
parts = []
if isinstance(value, six.string_types):
parts.append(utils.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in six.iteritems(value):
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + utils.utf8(name) + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + utils.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _object_path(self, bucket, object_name):
if self.application.bucket_depth < 1:
return os.path.abspath(os.path.join(
self.application.directory, bucket, object_name))
hash = utils.get_hash_str(object_name)
path = os.path.abspath(os.path.join(
self.application.directory, bucket))
for i in range(self.application.bucket_depth):
path = os.path.join(path, hash[:2 * (i + 1)])
return os.path.join(path, object_name)
class RootHandler(BaseRequestHandler):
def get(self):
names = os.listdir(self.application.directory)
buckets = []
for name in names:
path = os.path.join(self.application.directory, name)
info = os.stat(path)
buckets.append({
"Name": name,
"CreationDate": datetime.datetime.utcfromtimestamp(
info.st_ctime),
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
terse = int(self.get_argument("terse", 0))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
self.set_404()
return
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}})
def put(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or
os.path.exists(path)):
self.set_status(403)
return
fileutils.ensure_tree(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
self.set_404()
return
if len(os.listdir(path)) > 0:
self.set_status(403)
return
os.rmdir(path)
self.set_status(204)
self.finish()
def head(self, bucket_name):
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
if (not path.startswith(self.application.directory) or
not os.path.isdir(path)):
self.set_404()
return
self.set_status(200)
self.finish()
class ObjectHandler(BaseRequestHandler):
def get(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
self.set_404()
return
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
info.st_mtime))
object_file = open(path, "r")
try:
self.finish(object_file.read())
finally:
object_file.close()
def put(self, bucket, object_name):
object_name = urllib.unquote(object_name)
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if (not bucket_dir.startswith(self.application.directory) or
not os.path.isdir(bucket_dir)):
self.set_404()
return
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
self.set_status(403)
return
directory = os.path.dirname(path)
fileutils.ensure_tree(directory)
object_file = open(path, "w")
object_file.write(self.request.body)
object_file.close()
self.set_header('ETag',
'"%s"' % utils.get_hash_str(self.request.body))
self.finish()
def delete(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if (not path.startswith(self.application.directory) or
not os.path.isfile(path)):
self.set_404()
return
os.unlink(path)
self.set_status(204)
self.finish()
| apache-2.0 |
comepradz/pybrain | pybrain/rl/environments/twoplayergames/capturegame.py | 25 | 8789 | __author__ = 'Tom Schaul, tom@idsia.ch'
from random import choice
from scipy import zeros
from .twoplayergame import TwoPlayerGame
# TODO: undo operation
class CaptureGame(TwoPlayerGame):
""" the capture game is a simplified version of the Go game: the first player to capture a stone wins!
Pass moves are forbidden."""
# CHECKME: suicide allowed?
BLACK = 1
WHITE = -1
EMPTY = 0
startcolor = BLACK
def __init__(self, size, suicideenabled=True):
""" the size of the board is generally between 3 and 19. """
self.size = size
self.suicideenabled = suicideenabled
self.reset()
def _iterPos(self):
""" an iterator over all the positions of the board. """
for i in range(self.size):
for j in range(self.size):
yield (i, j)
def reset(self):
""" empty the board. """
TwoPlayerGame.reset(self)
self.movesDone = 0
self.b = {}
for p in self._iterPos():
self.b[p] = self.EMPTY
# which stone belongs to which group
self.groups = {}
# how many liberties does each group have
self.liberties = {}
@property
def indim(self):
return self.size ** 2
@property
def outdim(self):
return 2 * self.size ** 2
def getBoardArray(self):
""" an array with two boolean values per position, indicating
'white stone present' and 'black stone present' respectively. """
a = zeros(self.outdim)
for i, p in enumerate(self._iterPos()):
if self.b[p] == self.WHITE:
a[2 * i] = 1
elif self.b[p] == self.BLACK:
a[2 * i + 1] = 1
return a
def isLegal(self, c, pos):
if pos not in self.b:
return False
elif self.b[pos] != self.EMPTY:
return False
elif not self.suicideenabled:
return not self._suicide(c, pos)
return True
def doMove(self, c, pos):
""" the action is a (color, position) tuple, for the next stone to move.
returns True if the move was legal. """
self.movesDone += 1
if pos == 'resign':
self.winner = -c
return True
elif not self.isLegal(c, pos):
return False
elif self._suicide(c, pos):
assert self.suicideenabled
self.b[pos] = 'y'
self.winner = -c
return True
elif self._capture(c, pos):
self.winner = c
self.b[pos] = 'x'
return True
else:
self._setStone(c, pos)
return True
def getSensors(self):
""" just a list of the board position states. """
return [x[1] for x in sorted(self.b.items())]
def __str__(self):
s = ''
for i in range(self.size):
for j in range(self.size):
val = self.b[(i, j)]
if val == self.EMPTY: s += ' .'
elif val == self.BLACK: s += ' X'
elif val == self.WHITE: s += ' O'
else: s += ' ' + str(val)
s += '\n'
if self.winner:
if self.winner == self.BLACK:
w = 'Black (#)'
elif self.winner == self.WHITE:
w = 'White (*)'
else:
w = self.winner
s += 'Winner: ' + w
s += ' (moves done:' + str(self.movesDone) + ')\n'
return s
def _neighbors(self, pos):
""" the 4 neighboring positions """
res = []
if pos[1] < self.size - 1: res.append((pos[0], pos[1] + 1))
if pos[1] > 0: res.append((pos[0], pos[1] - 1))
if pos[0] < self.size - 1: res.append((pos[0] + 1, pos[1]))
if pos[0] > 0: res.append((pos[0] - 1, pos[1]))
return res
def _setStone(self, c, pos):
""" set stone, and update liberties and groups. """
self.b[pos] = c
merge = False
self.groups[pos] = self.size * pos[0] + pos[1]
freen = [n for n in self._neighbors(pos) if self.b[n] == self.EMPTY]
self.liberties[self.groups[pos]] = set(freen)
for n in self._neighbors(pos):
if self.b[n] == -c:
self.liberties[self.groups[n]].difference_update([pos])
elif self.b[n] == c:
if merge:
newg = self.groups[pos]
oldg = self.groups[n]
if newg == oldg:
self.liberties[newg].difference_update([pos])
else:
# merging 2 groups
for p in list(self.groups.keys()):
if self.groups[p] == oldg:
self.groups[p] = newg
self.liberties[newg].update(self.liberties[oldg])
self.liberties[newg].difference_update([pos])
del self.liberties[oldg]
else:
# connect to this group
del self.liberties[self.groups[pos]]
self.groups[pos] = self.groups[n]
self.liberties[self.groups[n]].update(freen)
self.liberties[self.groups[n]].difference_update([pos])
merge = True
def _suicide(self, c, pos):
""" would putting a stone here be suicide for c? """
# any free neighbors?
for n in self._neighbors(pos):
if self.b[n] == self.EMPTY:
return False
# any friendly neighbor with extra liberties?
for n in self._neighbors(pos):
if self.b[n] == c:
if len(self.liberties[self.groups[n]]) > 1:
return False
# capture all surrounding ennemies?
if self._capture(c, pos):
return False
return True
def _capture(self, c, pos):
""" would putting a stone here lead to a capture? """
for n in self._neighbors(pos):
if self.b[n] == -c:
if len(self.liberties[self.groups[n]]) == 1:
return True
return False
def getLiberties(self, pos):
""" how many liberties does the stone at pos have? """
if self.b[pos] == self.EMPTY:
return None
return len(self.liberties[self.groups[pos]])
def getGroupSize(self, pos):
""" what size is the worm that this stone is part of? """
if self.b[pos] == self.EMPTY:
return None
g = self.groups[pos]
return len([x for x in list(self.groups.values()) if x == g])
def getLegals(self, c):
""" return all the legal positions for a color """
return [p for p in self._iterPos() if self.b[p] == self.EMPTY]
def getAcceptable(self, c):
""" return all legal positions for a color that don't commit suicide. """
return [p for p in self.getLegals(c) if not self._suicide(c, p)]
def getKilling(self, c):
""" return all legal positions for a color that immediately kill the opponent. """
return [p for p in self.getAcceptable(c) if self._capture(c, p)]
def randomBoard(self, nbmoves):
""" produce a random, undecided and legal capture-game board, after at most nbmoves.
:return: the number of moves actually done. """
c = self.BLACK
self.reset()
for i in range(nbmoves):
l = set(self.getAcceptable(c))
l.difference_update(self.getKilling(c))
if len(l) == 0:
return i
self._setStone(c, choice(list(l)))
c = -c
return nbmoves
def giveHandicap(self, h, color=BLACK):
i = 0
for pos in self._handicapIterator():
i += 1
if i > h:
return
if self.isLegal(color, pos):
self._setStone(color, pos)
def _handicapIterator(self):
s = self.size
assert s > 2
yield (1, 1)
if s > 3:
# 4 corners
yield (s - 2, s - 2)
yield (1, s - 2)
yield (s - 2, 1)
if s > 4:
for i in range(2, s - 2):
yield (i, 1)
yield (i, s - 2)
yield (1, i)
yield (s - 2, i)
def playToTheEnd(self, p1, p2):
""" alternate playing moves between players until the game is over. """
assert p1.color == -p2.color
i = 0
p1.game = self
p2.game = self
players = [p1, p2]
while not self.gameOver():
p = players[i]
self.performAction(p.getAction())
i = (i + 1) % 2
| bsd-3-clause |
spring-week-topos/nova-week | nova/virt/disk/api.py | 6 | 23194 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import os
import random
import tempfile
if os.name != 'nt':
import crypt
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import paths
from nova import utils
from nova.virt.disk.mount import api as mount
from nova.virt.disk.vfs import api as vfs
from nova.virt import images
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/interfaces.template'),
help='Template file for injected network'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[],
help='Name of the mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
cfg.BoolOpt('resize_fs_using_block_device',
default=False,
help='Attempt to resize the filesystem by accessing the '
'image over a block device. This is done by the host '
'and may not be necessary if the image contains a recent '
'version of cloud-init. Possible mechanisms require '
'the nbd driver (for qcow and raw), or loop (for raw).'),
]
CONF = cfg.CONF
CONF.register_opts(disk_opts)
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
_DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3',
'windows': 'ntfs'}
for s in CONF.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
def get_fs_type_for_os_type(os_type):
return os_type if _MKFS_COMMAND.get(os_type) else 'default'
def mkfs(os_type, fs_label, target, run_as_root=True):
"""Format a file or block device using
a user provided command for each os type.
If user has not provided any configuration,
format type will be used according to a
default_ephemeral_format configuration
or a system defaults.
"""
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % {'fs_label': fs_label, 'target': target}
if mkfs_command:
utils.execute(*mkfs_command.split(), run_as_root=run_as_root)
else:
default_fs = CONF.default_ephemeral_format
if not default_fs:
default_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3')
utils.mkfs(default_fs, target, fs_label, run_as_root=run_as_root)
def resize2fs(image, check_exit_code=False, run_as_root=False):
try:
utils.execute('e2fsck',
'-fp',
image,
check_exit_code=[0, 1, 2],
run_as_root=run_as_root)
except processutils.ProcessExecutionError as exc:
LOG.debug(_("Checking the file system with e2fsck has failed, "
"the resize will be aborted. (%s)"), exc)
else:
utils.execute('resize2fs',
image,
check_exit_code=check_exit_code,
run_as_root=run_as_root)
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
return images.qemu_img_info(path).virtual_size
def extend(image, size, use_cow=False):
"""Increase image to size."""
if not can_resize_image(image, size):
return
utils.execute('qemu-img', 'resize', image, size)
# if we can't access the filesystem, we can't do anything more
if not is_image_partitionless(image, use_cow):
return
def safe_resize2fs(dev, run_as_root=False, finally_call=lambda: None):
try:
resize2fs(dev, run_as_root=run_as_root, check_exit_code=[0])
except processutils.ProcessExecutionError as exc:
LOG.debug(_("Resizing the file system with resize2fs "
"has failed with error: %s"), exc)
finally:
finally_call()
# NOTE(vish): attempts to resize filesystem
if use_cow:
if CONF.resize_fs_using_block_device:
# in case of non-raw disks we can't just resize the image, but
# rather the mounted device instead
mounter = mount.Mount.instance_for_format(
image, None, None, 'qcow2')
if mounter.get_dev():
safe_resize2fs(mounter.device,
run_as_root=True,
finally_call=mounter.unget_dev)
else:
safe_resize2fs(image)
def can_resize_image(image, size):
"""Check whether we can resize the container image file."""
LOG.debug(_('Checking if we can resize image %(image)s. '
'size=%(size)s'), {'image': image, 'size': size})
# Check that we're increasing the size
virt_size = get_disk_size(image)
if virt_size >= size:
LOG.debug(_('Cannot resize image %s to a smaller size.'),
image)
return False
return True
def is_image_partitionless(image, use_cow=False):
"""Check whether we can resize contained file system."""
LOG.debug(_('Checking if we can resize filesystem inside %(image)s. '
'CoW=%(use_cow)s'), {'image': image, 'use_cow': use_cow})
# Check the image is unpartitioned
if use_cow:
try:
fs = vfs.VFS.instance_for_image(image, 'qcow2', None)
fs.setup()
fs.teardown()
except exception.NovaException as e:
LOG.debug(_('Unable to mount image %(image)s with '
'error %(error)s. Cannot resize.'),
{'image': image,
'error': e})
return False
else:
# For raw, we can directly inspect the file system
try:
utils.execute('e2label', image)
except processutils.ProcessExecutionError as e:
LOG.debug(_('Unable to determine label for image %(image)s with '
'error %(error)s. Cannot resize.'),
{'image': image,
'error': e})
return False
return True
class _DiskImage(object):
"""Provide operations on a disk image file."""
tmp_prefix = 'openstack-disk-mount-tmp'
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
self.use_cow = use_cow
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
if mount_dir:
device = self._device_for_path(mount_dir)
if device:
self._reset(device)
@staticmethod
def _device_for_path(path):
device = None
path = os.path.realpath(path)
with open("/proc/mounts", 'r') as ifp:
for line in ifp:
fields = line.split()
if fields[1] == path:
device = fields[0]
break
return device
def _reset(self, device):
"""Reset internal state for a previously mounted directory."""
self._mounter = mount.Mount.instance_for_device(self.image,
self.mount_dir,
self.partition,
device)
mount_name = os.path.basename(self.mount_dir or '')
self._mkdir = mount_name.startswith(self.tmp_prefix)
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.NovaException(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix)
self._mkdir = True
imgfmt = "raw"
if self.use_cow:
imgfmt = "qcow2"
mounter = mount.Mount.instance_for_format(self.image,
self.mount_dir,
self.partition,
imgfmt)
if mounter.do_mount():
self._mounter = mounter
return self._mounter.device
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
return None
def umount(self):
"""Umount a mount point from the filesystem."""
if self._mounter:
self._mounter.do_umount()
self._mounter = None
def teardown(self):
"""Remove a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_teardown()
self._mounter = None
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
files=None, partition=None, use_cow=False, mandatory=()):
"""Inject the specified items into a disk image.
If an item name is not specified in the MANDATORY iterable, then a warning
is logged on failure to inject that item, rather than raising an exception.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If PARTITION is not specified the image is mounted as a single partition.
Returns True if all requested operations completed without issue.
Raises an exception if a mandatory item can't be injected.
"""
LOG.debug(_("Inject data image=%(image)s key=%(key)s net=%(net)s "
"metadata=%(metadata)s admin_password=<SANITIZED> "
"files=%(files)s partition=%(partition)s use_cow=%(use_cow)s"),
{'image': image, 'key': key, 'net': net, 'metadata': metadata,
'files': files, 'partition': partition, 'use_cow': use_cow})
fmt = "raw"
if use_cow:
fmt = "qcow2"
try:
# Note(mrda): Test if the image exists first to short circuit errors
os.stat(image)
fs = vfs.VFS.instance_for_image(image, fmt, partition)
fs.setup()
except Exception as e:
# If a mandatory item is passed to this function,
# then reraise the exception to indicate the error.
for inject in mandatory:
inject_val = locals()[inject]
if inject_val:
raise
LOG.warn(_('Ignoring error injecting data into image '
'(%(e)s)'), {'e': e})
return False
try:
return inject_data_into_fs(fs, key, net, metadata,
admin_password, files, mandatory)
finally:
fs.teardown()
def setup_container(image, container_dir, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
Returns path of image device which is mounted to the container directory.
"""
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
dev = img.mount()
if dev is None:
LOG.error(_("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s"),
{"image": img, "target": container_dir,
"errors": img.errors})
raise exception.NovaException(img.errors)
return dev
def teardown_container(container_dir, container_root_device=None):
"""Teardown the container rootfs mounting once it is spawned.
It will umount the container that is mounted,
and delete any linked devices.
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
img.teardown()
# Make sure container_root_device is released when teardown container.
if container_root_device:
if 'loop' in container_root_device:
LOG.debug(_("Release loop device %s"), container_root_device)
utils.execute('losetup', '--detach', container_root_device,
run_as_root=True, attempts=3)
else:
LOG.debug(_('Release nbd device %s'), container_root_device)
utils.execute('qemu-nbd', '-d', container_root_device,
run_as_root=True)
except Exception as exn:
LOG.exception(_('Failed to teardown container filesystem: %s'), exn)
def clean_lxc_namespace(container_dir):
"""Clean up the container namespace rootfs mounting one spawned.
It will umount the mounted names that are mounted
but leave the linked devices alone.
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
img.umount()
except Exception as exn:
LOG.exception(_('Failed to umount container filesystem: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
mandatory=()):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data.
If an item name is not specified in the MANDATORY iterable, then a warning
is logged on failure to inject that item, rather than raising an exception.
Returns True if all requested operations completed without issue.
Raises an exception if a mandatory item can't be injected.
"""
status = True
for inject in ('key', 'net', 'metadata', 'admin_password', 'files'):
inject_val = locals()[inject]
inject_func = globals()['_inject_%s_into_fs' % inject]
if inject_val:
try:
inject_func(inject_val, fs)
except Exception as e:
if inject in mandatory:
raise
LOG.warn(_('Ignoring error injecting %(inject)s into image '
'(%(e)s)'), {'e': e, 'inject': inject})
status = False
return status
def _inject_files_into_fs(files, fs):
for (path, contents) in files:
# NOTE(wangpan): Ensure the parent dir of injecting file exists
parent_dir = os.path.dirname(path)
if (len(parent_dir) > 0 and parent_dir != "/"
and not fs.has_file(parent_dir)):
fs.make_path(parent_dir)
fs.set_ownership(parent_dir, "root", "root")
fs.set_permissions(parent_dir, 0o744)
_inject_file_into_fs(fs, path, contents)
def _inject_file_into_fs(fs, path, contents, append=False):
LOG.debug(_("Inject file fs=%(fs)s path=%(path)s append=%(append)s"),
{'fs': fs, 'path': path, 'append': append})
if append:
fs.append_file(path, contents)
else:
fs.replace_file(path, contents)
def _inject_metadata_into_fs(metadata, fs):
LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s"),
{'fs': fs, 'metadata': metadata})
_inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
def _setup_selinux_for_keys(fs, sshdir):
"""Get selinux guests to ensure correct context on injected keys."""
if not fs.has_file(os.path.join("etc", "selinux")):
return
rclocal = os.path.join('etc', 'rc.local')
rc_d = os.path.join('etc', 'rc.d')
if not fs.has_file(rclocal) and fs.has_file(rc_d):
rclocal = os.path.join(rc_d, 'rc.local')
# Note some systems end rc.local with "exit 0"
# and so to append there you'd need something like:
# utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True)
restorecon = [
'\n',
'# Added by Nova to ensure injected ssh keys have the right context\n',
'restorecon -RF %s 2>/dev/null || :\n' % sshdir,
]
if not fs.has_file(rclocal):
restorecon.insert(0, '#!/bin/sh')
_inject_file_into_fs(fs, rclocal, ''.join(restorecon), append=True)
fs.set_permissions(rclocal, 0o700)
def _inject_key_into_fs(key, fs):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
LOG.debug(_("Inject key fs=%(fs)s key=%(key)s"), {'fs': fs, 'key': key})
sshdir = os.path.join('root', '.ssh')
fs.make_path(sshdir)
fs.set_ownership(sshdir, "root", "root")
fs.set_permissions(sshdir, 0o700)
keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
fs.set_permissions(keyfile, 0o600)
_setup_selinux_for_keys(fs, sshdir)
def _inject_net_into_fs(net, fs):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
LOG.debug(_("Inject key fs=%(fs)s net=%(net)s"), {'fs': fs, 'net': net})
netdir = os.path.join('etc', 'network')
fs.make_path(netdir)
fs.set_ownership(netdir, "root", "root")
fs.set_permissions(netdir, 0o744)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
def _inject_admin_password_into_fs(admin_passwd, fs):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
LOG.debug(_("Inject admin password fs=%(fs)s "
"admin_passwd=<SANITIZED>"), {'fs': fs})
admin_user = 'root'
passwd_path = os.path.join('etc', 'passwd')
shadow_path = os.path.join('etc', 'shadow')
passwd_data = fs.read_file(passwd_path)
shadow_data = fs.read_file(shadow_path)
new_shadow_data = _set_passwd(admin_user, admin_passwd,
passwd_data, shadow_data)
fs.replace_file(shadow_path, new_shadow_data)
def _generate_salt():
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
salt = 16 * ' '
return ''.join([random.choice(salt_set) for c in salt])
def _set_passwd(username, admin_passwd, passwd_data, shadow_data):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.NovaException(), IOError()
"""
if os.name == 'nt':
raise exception.NovaException(_('Not implemented on Windows'))
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = _generate_salt()
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
p_file = passwd_data.split("\n")
s_file = shadow_data.split("\n")
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.NovaException(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.NovaException(msg % username)
return "\n".join(new_shadow)
| apache-2.0 |
mieand/kmos | kmos/config.py | 3 | 1024 | #!/usr/bin/env python
import os
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
APP_ABS_PATH = os.path.dirname(os.path.abspath(__file__))
GLADEFILE = os.path.join(APP_ABS_PATH, 'kmc_editor.glade')
GLADEFILE = 'kmos/kmc_editor.glade'
try:
import kiwi
kiwi.environ.environ.add_resource('glade', APP_ABS_PATH)
except:
pass
| gpl-3.0 |
windxixi/OptiWiz-Kernel-F200-JB | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
arthru/OpenUpgrade | addons/lunch/lunch.py | 39 | 23388 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from xml.sax.saxutils import escape
import time
from openerp.osv import fields, osv
from datetime import datetime
from lxml import etree
from openerp import tools
from openerp.tools.translate import _
class lunch_order(osv.Model):
"""
lunch order (contains one or more lunch order line(s))
"""
_name = 'lunch.order'
_description = 'Lunch Order'
_order = 'date desc'
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for elmt in self.browse(cr, uid, ids, context=context):
name = _("Lunch Order")
name = name + ' ' + str(elmt.id)
res.append((elmt.id, name))
return res
def _price_get(self, cr, uid, ids, name, arg, context=None):
"""
get and sum the order lines' price
"""
result = dict.fromkeys(ids, 0)
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = sum(order_line.product_id.price
for order_line in order.order_line_ids)
return result
def _fetch_orders_from_lines(self, cr, uid, ids, name, context=None):
"""
return the list of lunch orders to which belong the order lines `ids´
"""
result = set()
for order_line in self.browse(cr, uid, ids, context=context):
if order_line.order_id:
result.add(order_line.order_id.id)
return list(result)
def add_preference(self, cr, uid, ids, pref_id, context=None):
"""
create a new order line based on the preference selected (pref_id)
"""
assert len(ids) == 1
orderline_ref = self.pool.get('lunch.order.line')
prod_ref = self.pool.get('lunch.product')
order = self.browse(cr, uid, ids[0], context=context)
pref = orderline_ref.browse(cr, uid, pref_id, context=context)
new_order_line = {
'date': order.date,
'user_id': uid,
'product_id': pref.product_id.id,
'note': pref.note,
'order_id': order.id,
'price': pref.product_id.price,
'supplier': pref.product_id.supplier.id
}
return orderline_ref.create(cr, uid, new_order_line, context=context)
def _alerts_get(self, cr, uid, ids, name, arg, context=None):
"""
get the alerts to display on the order form
"""
result = {}
alert_msg = self._default_alerts_get(cr, uid, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.state == 'new':
result[order.id] = alert_msg
return result
def check_day(self, alert):
"""
This method is used by can_display_alert
to check if the alert day corresponds
to the current day
"""
today = datetime.now().isoweekday()
assert 1 <= today <= 7, "Should be between 1 and 7"
mapping = dict((idx, name) for idx, name in enumerate('days monday tuesday wednesday thursday friday saturday sunday'.split()))
return alert[mapping[today]]
def can_display_alert(self, alert):
"""
This method check if the alert can be displayed today
"""
if alert.alter_type == 'specific':
#the alert is only activated on a specific day
return alert.specific_day == time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
elif alert.alter_type == 'week':
#the alert is activated during some days of the week
return self.check_day(alert)
return True # alter_type == 'days' (every day)
def _default_alerts_get(self, cr, uid, context=None):
"""
get the alerts to display on the order form
"""
alert_ref = self.pool.get('lunch.alert')
alert_ids = alert_ref.search(cr, uid, [], context=context)
alert_msg = []
for alert in alert_ref.browse(cr, uid, alert_ids, context=context):
#check if the address must be displayed today
if self.can_display_alert(alert):
#display the address only during its active time
mynow = fields.datetime.context_timestamp(cr, uid, datetime.now(), context=context)
hour_to = int(alert.active_to)
min_to = int((alert.active_to - hour_to) * 60)
to_alert = datetime.strptime(str(hour_to) + ":" + str(min_to), "%H:%M")
hour_from = int(alert.active_from)
min_from = int((alert.active_from - hour_from) * 60)
from_alert = datetime.strptime(str(hour_from) + ":" + str(min_from), "%H:%M")
if mynow.time() >= from_alert.time() and mynow.time() <= to_alert.time():
alert_msg.append(alert.message)
return '\n'.join(alert_msg)
def onchange_price(self, cr, uid, ids, order_line_ids, context=None):
"""
Onchange methode that refresh the total price of order
"""
res = {'value': {'total': 0.0}}
order_line_ids = self.resolve_o2m_commands_to_record_dicts(cr, uid, "order_line_ids", order_line_ids, ["price"], context=context)
if order_line_ids:
tot = 0.0
product_ref = self.pool.get("lunch.product")
for prod in order_line_ids:
if 'product_id' in prod:
tot += product_ref.browse(cr, uid, prod['product_id'], context=context).price
else:
tot += prod['price']
res = {'value': {'total': tot}}
return res
def __getattr__(self, attr):
"""
this method catch unexisting method call and if it starts with
add_preference_'n' we execute the add_preference method with
'n' as parameter
"""
if attr.startswith('add_preference_'):
pref_id = int(attr[15:])
def specific_function(cr, uid, ids, context=None):
return self.add_preference(cr, uid, ids, pref_id, context=context)
return specific_function
return super(lunch_order, self).__getattr__(attr)
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
"""
Add preferences in the form view of order.line
"""
res = super(lunch_order,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
line_ref = self.pool.get("lunch.order.line")
if view_type == 'form':
doc = etree.XML(res['arch'])
pref_ids = line_ref.search(cr, uid, [('user_id', '=', uid)], order='id desc', context=context)
xml_start = etree.Element("div")
#If there are no preference (it's the first time for the user)
if len(pref_ids)==0:
#create Elements
xml_no_pref_1 = etree.Element("div")
xml_no_pref_1.set('class','oe_inline oe_lunch_intro')
xml_no_pref_2 = etree.Element("h3")
xml_no_pref_2.text = _("This is the first time you order a meal")
xml_no_pref_3 = etree.Element("p")
xml_no_pref_3.set('class','oe_grey')
xml_no_pref_3.text = _("Select a product and put your order comments on the note.")
xml_no_pref_4 = etree.Element("p")
xml_no_pref_4.set('class','oe_grey')
xml_no_pref_4.text = _("Your favorite meals will be created based on your last orders.")
xml_no_pref_5 = etree.Element("p")
xml_no_pref_5.set('class','oe_grey')
xml_no_pref_5.text = _("Don't forget the alerts displayed in the reddish area")
#structure Elements
xml_start.append(xml_no_pref_1)
xml_no_pref_1.append(xml_no_pref_2)
xml_no_pref_1.append(xml_no_pref_3)
xml_no_pref_1.append(xml_no_pref_4)
xml_no_pref_1.append(xml_no_pref_5)
#Else: the user already have preferences so we display them
else:
preferences = line_ref.browse(cr, uid, pref_ids, context=context)
categories = {} #store the different categories of products in preference
count = 0
for pref in preferences:
#For each preference
categories.setdefault(pref.product_id.category_id.name, {})
#if this product has already been added to the categories dictionnary
if pref.product_id.id in categories[pref.product_id.category_id.name]:
#we check if for the same product the note has already been added
if pref.note not in categories[pref.product_id.category_id.name][pref.product_id.id]:
#if it's not the case then we add this to preferences
categories[pref.product_id.category_id.name][pref.product_id.id][pref.note] = pref
#if this product is not in the dictionnay, we add it
else:
categories[pref.product_id.category_id.name][pref.product_id.id] = {}
categories[pref.product_id.category_id.name][pref.product_id.id][pref.note] = pref
currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
#For each preferences that we get, we will create the XML structure
for key, value in categories.items():
xml_pref_1 = etree.Element("div")
xml_pref_1.set('class', 'oe_lunch_30pc')
xml_pref_2 = etree.Element("h2")
xml_pref_2.text = key
xml_pref_1.append(xml_pref_2)
i = 0
value = value.values()
#TODO: sorted_values is used for a quick and dirty hack in order to display the 5 last orders of each categories.
#It would be better to fetch only the 5 items to display instead of fetching everything then sorting them in order to keep only the 5 last.
#NB: The note could also be ignored + we could fetch the preferences on the most ordered products instead of the last ones...
sorted_values = {}
for val in value:
for elmt in val.values():
sorted_values[elmt.id] = elmt
for key, pref in sorted(sorted_values.iteritems(), key=lambda (k, v): (k, v), reverse=True):
#We only show 5 preferences per category (or it will be too long)
if i == 5:
break
i += 1
xml_pref_3 = etree.Element("div")
xml_pref_3.set('class','oe_lunch_vignette')
xml_pref_1.append(xml_pref_3)
xml_pref_4 = etree.Element("span")
xml_pref_4.set('class','oe_lunch_button')
xml_pref_3.append(xml_pref_4)
xml_pref_5 = etree.Element("button")
xml_pref_5.set('name',"add_preference_"+str(pref.id))
xml_pref_5.set('class','oe_link oe_i oe_button_plus')
xml_pref_5.set('type','object')
xml_pref_5.set('string','+')
xml_pref_4.append(xml_pref_5)
xml_pref_6 = etree.Element("button")
xml_pref_6.set('name',"add_preference_"+str(pref.id))
xml_pref_6.set('class','oe_link oe_button_add')
xml_pref_6.set('type','object')
xml_pref_6.set('string',_("Add"))
xml_pref_4.append(xml_pref_6)
xml_pref_7 = etree.Element("div")
xml_pref_7.set('class','oe_group_text_button')
xml_pref_3.append(xml_pref_7)
xml_pref_8 = etree.Element("div")
xml_pref_8.set('class','oe_lunch_text')
xml_pref_8.text = escape(pref.product_id.name)+str(" ")
xml_pref_7.append(xml_pref_8)
price = pref.product_id.price or 0.0
cur = currency.name or ''
xml_pref_9 = etree.Element("span")
xml_pref_9.set('class','oe_tag')
xml_pref_9.text = str(price)+str(" ")+cur
xml_pref_8.append(xml_pref_9)
xml_pref_10 = etree.Element("div")
xml_pref_10.set('class','oe_grey')
xml_pref_10.text = escape(pref.note or '')
xml_pref_3.append(xml_pref_10)
xml_start.append(xml_pref_1)
first_node = doc.xpath("//div[@name='preferences']")
if first_node and len(first_node)>0:
first_node[0].append(xml_start)
res['arch'] = etree.tostring(doc)
return res
_columns = {
'user_id': fields.many2one('res.users', 'User Name', required=True, readonly=True, states={'new':[('readonly', False)]}),
'date': fields.date('Date', required=True, readonly=True, states={'new':[('readonly', False)]}),
'order_line_ids': fields.one2many('lunch.order.line', 'order_id', 'Products', ondelete="cascade", readonly=True, states={'new':[('readonly', False)]}),
'total': fields.function(_price_get, string="Total", store={
'lunch.order.line': (_fetch_orders_from_lines, ['product_id','order_id'], 20),
}),
'state': fields.selection([('new', 'New'), \
('confirmed','Confirmed'), \
('cancelled','Cancelled'), \
('partially','Partially Confirmed')] \
,'Status', readonly=True, select=True),
'alerts': fields.function(_alerts_get, string="Alerts", type='text'),
}
_defaults = {
'user_id': lambda self, cr, uid, context: uid,
'date': fields.date.context_today,
'state': 'new',
'alerts': _default_alerts_get,
}
class lunch_order_line(osv.Model):
"""
lunch order line: one lunch order can have many order lines
"""
_name = 'lunch.order.line'
_description = 'lunch order line'
def onchange_price(self, cr, uid, ids, product_id, context=None):
if product_id:
price = self.pool.get('lunch.product').browse(cr, uid, product_id, context=context).price
return {'value': {'price': price}}
return {'value': {'price': 0.0}}
def order(self, cr, uid, ids, context=None):
"""
The order_line is ordered to the supplier but isn't received yet
"""
for order_line in self.browse(cr, uid, ids, context=context):
order_line.write({'state': 'ordered'}, context=context)
return self._update_order_lines(cr, uid, ids, context=context)
def confirm(self, cr, uid, ids, context=None):
"""
confirm one or more order line, update order status and create new cashmove
"""
cashmove_ref = self.pool.get('lunch.cashmove')
for order_line in self.browse(cr, uid, ids, context=context):
if order_line.state != 'confirmed':
values = {
'user_id': order_line.user_id.id,
'amount': -order_line.price,
'description': order_line.product_id.name,
'order_id': order_line.id,
'state': 'order',
'date': order_line.date,
}
cashmove_ref.create(cr, uid, values, context=context)
order_line.write({'state': 'confirmed'}, context=context)
return self._update_order_lines(cr, uid, ids, context=context)
def _update_order_lines(self, cr, uid, ids, context=None):
"""
Update the state of lunch.order based on its orderlines
"""
orders_ref = self.pool.get('lunch.order')
orders = []
for order_line in self.browse(cr, uid, ids, context=context):
orders.append(order_line.order_id)
for order in set(orders):
isconfirmed = True
for orderline in order.order_line_ids:
if orderline.state == 'new':
isconfirmed = False
if orderline.state == 'cancelled':
isconfirmed = False
orders_ref.write(cr, uid, [order.id], {'state': 'partially'}, context=context)
if isconfirmed:
orders_ref.write(cr, uid, [order.id], {'state': 'confirmed'}, context=context)
return {}
def cancel(self, cr, uid, ids, context=None):
"""
cancel one or more order.line, update order status and unlink existing cashmoves
"""
cashmove_ref = self.pool.get('lunch.cashmove')
for order_line in self.browse(cr, uid, ids, context=context):
order_line.write({'state':'cancelled'}, context=context)
cash_ids = [cash.id for cash in order_line.cashmove]
cashmove_ref.unlink(cr, uid, cash_ids, context=context)
return self._update_order_lines(cr, uid, ids, context=context)
def _get_line_order_ids(self, cr, uid, ids, context=None):
"""
return the list of lunch.order.lines ids to which belong the lunch.order 'ids'
"""
result = set()
for lunch_order in self.browse(cr, uid, ids, context=context):
for lines in lunch_order.order_line_ids:
result.add(lines.id)
return list(result)
_columns = {
'name': fields.related('product_id', 'name', readonly=True),
'order_id': fields.many2one('lunch.order', 'Order', ondelete='cascade'),
'product_id': fields.many2one('lunch.product', 'Product', required=True),
'date': fields.related('order_id', 'date', type='date', string="Date", readonly=True, store={
'lunch.order': (_get_line_order_ids, ['date'], 10),
'lunch.order.line': (lambda self, cr, uid, ids, ctx: ids, [], 10),
}),
'supplier': fields.related('product_id', 'supplier', type='many2one', relation='res.partner', string="Supplier", readonly=True, store=True),
'user_id': fields.related('order_id', 'user_id', type='many2one', relation='res.users', string='User', readonly=True, store=True),
'note': fields.text('Note'),
'price': fields.float("Price"),
'state': fields.selection([('new', 'New'), \
('confirmed', 'Received'), \
('ordered', 'Ordered'), \
('cancelled', 'Cancelled')], \
'Status', readonly=True, select=True),
'cashmove': fields.one2many('lunch.cashmove', 'order_id', 'Cash Move', ondelete='cascade'),
}
_defaults = {
'state': 'new',
}
class lunch_product(osv.Model):
"""
lunch product
"""
_name = 'lunch.product'
_description = 'lunch product'
_columns = {
'name': fields.char('Product', required=True, size=64),
'category_id': fields.many2one('lunch.product.category', 'Category', required=True),
'description': fields.text('Description', size=256),
'price': fields.float('Price', digits=(16,2)), #TODO: use decimal precision of 'Account', move it from product to decimal_precision
'supplier': fields.many2one('res.partner', 'Supplier'),
}
class lunch_product_category(osv.Model):
"""
lunch product category
"""
_name = 'lunch.product.category'
_description = 'lunch product category'
_columns = {
'name': fields.char('Category', required=True), #such as PIZZA, SANDWICH, PASTA, CHINESE, BURGER, ...
}
class lunch_cashmove(osv.Model):
"""
lunch cashmove => order or payment
"""
_name = 'lunch.cashmove'
_description = 'lunch cashmove'
_columns = {
'user_id': fields.many2one('res.users', 'User Name', required=True),
'date': fields.date('Date', required=True),
'amount': fields.float('Amount', required=True), #depending on the kind of cashmove, the amount will be positive or negative
'description': fields.text('Description'), #the description can be an order or a payment
'order_id': fields.many2one('lunch.order.line', 'Order', ondelete='cascade'),
'state': fields.selection([('order','Order'), ('payment','Payment')], 'Is an order or a Payment'),
}
_defaults = {
'user_id': lambda self, cr, uid, context: uid,
'date': fields.date.context_today,
'state': 'payment',
}
class lunch_alert(osv.Model):
"""
lunch alert
"""
_name = 'lunch.alert'
_description = 'Lunch Alert'
_columns = {
'message': fields.text('Message', size=256, required=True),
'alter_type': fields.selection([('specific', 'Specific Day'), \
('week', 'Every Week'), \
('days', 'Every Day')], \
string='Recurrency', required=True, select=True),
'specific_day': fields.date('Day'),
'monday': fields.boolean('Monday'),
'tuesday': fields.boolean('Tuesday'),
'wednesday': fields.boolean('Wednesday'),
'thursday': fields.boolean('Thursday'),
'friday': fields.boolean('Friday'),
'saturday': fields.boolean('Saturday'),
'sunday': fields.boolean('Sunday'),
'active_from': fields.float('Between', required=True),
'active_to': fields.float('And', required=True),
}
_defaults = {
'alter_type': 'specific',
'specific_day': fields.date.context_today,
'active_from': 7,
'active_to': 23,
}
| agpl-3.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/cms/south_migrations/0039_auto__del_field_page_moderator_state.py | 48 | 15792 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Page.moderator_state'
db.delete_column('cms_page', 'moderator_state')
def backwards(self, orm):
# Adding field 'Page.moderator_state'
db.add_column('cms_page', 'moderator_state',
self.gf('django.db.models.fields.SmallIntegerField')(default=0, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | apache-2.0 |
hkawasaki/kawasaki-aio8-1 | common/test/acceptance/pages/lms/register.py | 41 | 1816 | """
Registration page (create a new account)
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
from .dashboard import DashboardPage
class RegisterPage(PageObject):
"""
Registration page (create a new account)
"""
def __init__(self, browser, course_id):
"""
Course ID is currently of the form "edx/999/2013_Spring"
but this format could change.
"""
super(RegisterPage, self).__init__(browser)
self._course_id = course_id
@property
def url(self):
"""
URL for the registration page of a course.
"""
return "{base}/register?course_id={course_id}&enrollment_action={action}".format(
base=BASE_URL,
course_id=self._course_id,
action="enroll",
)
def is_browser_on_page(self):
return any([
'register' in title.lower()
for title in self.q(css='span.title-sub').text
])
def provide_info(self, email, password, username, full_name):
"""
Fill in registration info.
`email`, `password`, `username`, and `full_name` are the user's credentials.
"""
self.q(css='input#email').fill(email)
self.q(css='input#password').fill(password)
self.q(css='input#username').fill(username)
self.q(css='input#name').fill(full_name)
self.q(css='input#tos-yes').first.click()
self.q(css='input#honorcode-yes').first.click()
def submit(self):
"""
Submit registration info to create an account.
"""
self.q(css='button#submit').first.click()
# The next page is the dashboard; make sure it loads
dashboard = DashboardPage(self.browser)
dashboard.wait_for_page()
return dashboard
| agpl-3.0 |
zhiwliu/openshift-ansible | roles/lib_utils/src/class/yedit.py | 14 | 23086 | # flake8: noqa
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
| apache-2.0 |
bkahlert/seqan-research | raw/workshop12/workshop2012-data-20120906/trunk/misc/seqan_instrumentation/bin/classes/simplejson/tests/test_pass1.py | 259 | 1903 | from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
try:
json.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")
| mit |
NProfileAnalysisComputationalTool/npact | pynpact/tests/steps/test_allplots.py | 1 | 1699 | import os.path
from pynpact import parsing
from pynpact.steps import allplots
from pynpact.util import which
def test_binfile_exists():
assert allplots.BIN
assert os.path.exists(allplots.BIN)
def test_build_allplotsdef(gbkconfig):
parsing.length(gbkconfig)
lines = allplots.build_allplots_def(gbkconfig, 1).split('\n')
assert 26 == len(lines)
def test_plan_allplots(gbkconfig, executor):
allplots.allplots(gbkconfig, executor)
assert len(gbkconfig['psnames']) > 0
assert gbkconfig['psnames'][0] is not None
def test_combine_ps(gbkconfig, executor):
tasks = allplots.combine_ps_files(gbkconfig, executor)
assert len(tasks) == 1
psname = tasks[0]
assert gbkconfig['combined_ps_name']
assert gbkconfig['combined_ps_name'] == psname
assert gbkconfig['allplots_result'] == gbkconfig['combined_ps_name']
def test_ps_to_pdf(gbkconfig, null_executor):
jobs = allplots.convert_ps_to_pdf(gbkconfig, null_executor)
assert len(jobs) == 1
pdf_filename = jobs[0]
assert pdf_filename == gbkconfig['pdf_filename']
assert gbkconfig['pdf_filename'] == gbkconfig['allplots_result']
def test_all_the_way(gbkconfig, executor):
filename = allplots.plan(gbkconfig, executor)[0]
if which('ps2pdf'):
assert filename == gbkconfig['pdf_filename']
else:
assert filename == gbkconfig['combined_ps_name']
def test_all_the_way_async(gbkconfig, async_executor):
filename = allplots.plan(gbkconfig, async_executor)[0]
if which('ps2pdf'):
assert filename == gbkconfig['pdf_filename']
else:
assert filename == gbkconfig['combined_ps_name']
async_executor.result(filename, timeout=10)
| bsd-3-clause |
kholidfu/django | tests/indexes/models.py | 253 | 1714 | from django.db import connection, models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs['related_name'] = '+'
# Set unique to enable model cache.
kwargs['unique'] = True
super(CurrentTranslation, self).__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey('indexes.Article', models.CASCADE)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article'])
class Meta:
index_together = [
["headline", "pub_date"],
]
# Model for index_together being used only with single list
class IndexTogetherSingleList(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = ["headline", "pub_date"]
# Indexing a TextField on Oracle or MySQL results in index creation error.
if connection.vendor == 'postgresql':
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
| bsd-3-clause |
medit74/DeepLearning | MyPythonDeepLearning/Training/mymnist.py | 1 | 2739 | '''
Created on 2017. 4. 7.
@author: Byoungho Kang
'''
import numpy as np
import pickle
from Common.mnist import load_mnist
from Common.functions import sigmoid, softmax
'''
load mnist data set
'''
def getData():
(trainImg, trainLbl),(testImg, testLbl) = \
load_mnist(normalize=True, flatten=True, one_hot_label=False)
print(trainImg.shape) # (60000, 784)
print(trainLbl.shape) # (60000,)
print(testImg.shape) # (10000, 784)
print(testLbl.shape) # (10000,)
return (trainImg, trainLbl),(testImg, testLbl)
def initNetwork():
with open("../resources/sample_weight.pkl","rb") as f:
network = pickle.load(f)
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
print(W1.shape, W2.shape, W3.shape)
print(b1.shape, b2.shape, b3.shape)
return network
def predict(network,x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
def crossEntropyError(y, t):
# 1차원 배열인 경우 2차원 배열로 변경
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
miniBatchSize = y.shape[0]
'''
y[np.arange(miniBatchSize), t] 은 정답 레이블에 해당되는 신경망의 출력을 추출
예) 만약 배치사이즈가 10개라면 아래와 같이 10개의 신경망 추출에 대한 출력값을 추출
y[0,2], y[1,7], y[2,0], y[3,9],...y[9.5]
'''
return -np.sum(t*np.log(y[np.arange(miniBatchSize), t])) / miniBatchSize
'''
load dataset and initialize weight & bias
'''
(trainImg, trainLbl),(testImg, testLbl) = getData()
network = initNetwork();
'''
test first element
'''
img = trainImg[0].reshape(28,28) * 255
lbl = trainLbl[0]
'''
predict
- MiniBatch Target Setting
- Predict (추론 수행)
- Cost Function (비용함수계산)
'''
miniBatchSize = 10
print(len(trainImg), "개 중에 ", miniBatchSize, "를 미니배치 학습한다.")
miniBatchMask = np.random.choice(len(trainImg), miniBatchSize)
trainImgBatch = trainImg[miniBatchMask]
trainLblBatch = trainLbl[miniBatchMask]
print("Random Choice 된 정답 레이블", trainLblBatch)
y = predict(network, trainImgBatch)
print("신경망 출력 결과 (10-배치크기, 10-분류크기) Shape\n", y)
p = np.argmax(y, axis=1)
print("최대값으로 추정한 결과", p)
print(crossEntropyError(y, trainLblBatch))
| apache-2.0 |
vikomall/pyrax | samples/cloud_dns/update_ptr_record.py | 13 | 1646 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import pyrax
import pyrax.exceptions as exc
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
dns = pyrax.cloud_dns
cs = pyrax.cloudservers
# Be sure to substitute an actual server ID here
server_id = "00000000-0000-0000-0000-000000000000"
server = cs.servers.get(server_id)
domain_name = "abc.example.edu"
records = dns.list_ptr_records(server)
if not records:
print("There are no PTR records for device '%s' to update." % server)
sys.exit()
rec = records[0]
orig_ttl = rec.ttl
orig_data = rec.data
# Add 5 minutes
new_ttl = orig_ttl + 300
resp = dns.update_ptr_record(server, rec, domain_name, ttl=new_ttl,
data=orig_data, comment="TTL has been increased")
if resp:
print("Original TTL:", orig_ttl)
print("New TTL:", new_ttl)
else:
print("Update failed.")
print()
| apache-2.0 |
mezz64/home-assistant | homeassistant/components/sensor/device_trigger.py | 8 | 6032 | """Provides device triggers for sensors."""
import voluptuous as vol
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ABOVE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_FOR,
CONF_TYPE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import DOMAIN
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_BATTERY_LEVEL = "battery_level"
CONF_CURRENT = "current"
CONF_ENERGY = "energy"
CONF_HUMIDITY = "humidity"
CONF_ILLUMINANCE = "illuminance"
CONF_POWER = "power"
CONF_POWER_FACTOR = "power_factor"
CONF_PRESSURE = "pressure"
CONF_SIGNAL_STRENGTH = "signal_strength"
CONF_TEMPERATURE = "temperature"
CONF_TIMESTAMP = "timestamp"
CONF_VOLTAGE = "voltage"
CONF_VALUE = "value"
ENTITY_TRIGGERS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BATTERY_LEVEL}],
DEVICE_CLASS_CURRENT: [{CONF_TYPE: CONF_CURRENT}],
DEVICE_CLASS_ENERGY: [{CONF_TYPE: CONF_ENERGY}],
DEVICE_CLASS_HUMIDITY: [{CONF_TYPE: CONF_HUMIDITY}],
DEVICE_CLASS_ILLUMINANCE: [{CONF_TYPE: CONF_ILLUMINANCE}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWER}],
DEVICE_CLASS_POWER_FACTOR: [{CONF_TYPE: CONF_POWER_FACTOR}],
DEVICE_CLASS_PRESSURE: [{CONF_TYPE: CONF_PRESSURE}],
DEVICE_CLASS_SIGNAL_STRENGTH: [{CONF_TYPE: CONF_SIGNAL_STRENGTH}],
DEVICE_CLASS_TEMPERATURE: [{CONF_TYPE: CONF_TEMPERATURE}],
DEVICE_CLASS_TIMESTAMP: [{CONF_TYPE: CONF_TIMESTAMP}],
DEVICE_CLASS_VOLTAGE: [{CONF_TYPE: CONF_VOLTAGE}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_VALUE}],
}
TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
[
CONF_BATTERY_LEVEL,
CONF_CURRENT,
CONF_ENERGY,
CONF_HUMIDITY,
CONF_ILLUMINANCE,
CONF_POWER,
CONF_POWER_FACTOR,
CONF_PRESSURE,
CONF_SIGNAL_STRENGTH,
CONF_TEMPERATURE,
CONF_TIMESTAMP,
CONF_VOLTAGE,
CONF_VALUE,
]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
numeric_state_config = {
numeric_state_trigger.CONF_PLATFORM: "numeric_state",
numeric_state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if CONF_ABOVE in config:
numeric_state_config[numeric_state_trigger.CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[numeric_state_trigger.CONF_BELOW] = config[CONF_BELOW]
if CONF_FOR in config:
numeric_state_config[CONF_FOR] = config[CONF_FOR]
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = hass.states.get(entry.entity_id)
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
continue
if ATTR_DEVICE_CLASS in state.attributes:
device_class = state.attributes[ATTR_DEVICE_CLASS]
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
raise InvalidDeviceAutomationConfig
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
}
| apache-2.0 |
NEricN/RobotCSimulator | Python/App/Lib/csv.py | 63 | 16708 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = self.reader.next()
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
# Issue 20004: Because DictReader is a classic class, this setter is
# ignored. At this point in 2.7's lifecycle, it is too late to change the
# base class for fear of breaking working code. If you want to change
# fieldnames without overwriting the getter, set _fieldnames directly.
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| apache-2.0 |
whuthj/zpublic | 3rdparty/google/gtest/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| unlicense |
Prasad9/incubator-mxnet | python/mxnet/gluon/contrib/rnn/rnn_cell.py | 21 | 8034 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Definition of various recurrent neural network cells."""
__all__ = ['VariationalDropoutCell']
from ...rnn import BidirectionalCell, SequentialRNNCell, ModifierCell
from ...rnn.rnn_cell import _format_sequence, _get_begin_state
class VariationalDropoutCell(ModifierCell):
"""
Applies Variational Dropout on base cell.
(https://arxiv.org/pdf/1512.05287.pdf,
https://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf).
Variational dropout uses the same dropout mask across time-steps. It can be applied to RNN
inputs, outputs, and states. The masks for them are not shared.
The dropout mask is initialized when stepping forward for the first time and will remain
the same until .reset() is called. Thus, if using the cell and stepping manually without calling
.unroll(), the .reset() should be called after each sequence.
Parameters
----------
base_cell : RecurrentCell
The cell on which to perform variational dropout.
drop_inputs : float, default 0.
The dropout rate for inputs. Won't apply dropout if it equals 0.
drop_states : float, default 0.
The dropout rate for state inputs on the first state channel.
Won't apply dropout if it equals 0.
drop_outputs : float, default 0.
The dropout rate for outputs. Won't apply dropout if it equals 0.
"""
def __init__(self, base_cell, drop_inputs=0., drop_states=0., drop_outputs=0.):
assert not drop_states or not isinstance(base_cell, BidirectionalCell), \
"BidirectionalCell doesn't support variational state dropout. " \
"Please add VariationalDropoutCell to the cells underneath instead."
assert not drop_states \
or not isinstance(base_cell, SequentialRNNCell) or not base_cell._bidirectional, \
"Bidirectional SequentialRNNCell doesn't support variational state dropout. " \
"Please add VariationalDropoutCell to the cells underneath instead."
super(VariationalDropoutCell, self).__init__(base_cell)
self.drop_inputs = drop_inputs
self.drop_states = drop_states
self.drop_outputs = drop_outputs
self.drop_inputs_mask = None
self.drop_states_mask = None
self.drop_outputs_mask = None
def _alias(self):
return 'vardrop'
def reset(self):
super(VariationalDropoutCell, self).reset()
self.drop_inputs_mask = None
self.drop_states_mask = None
self.drop_outputs_mask = None
def _initialize_input_masks(self, F, inputs, states):
if self.drop_states and self.drop_states_mask is None:
self.drop_states_mask = F.Dropout(F.ones_like(states[0]),
p=self.drop_states)
if self.drop_inputs and self.drop_inputs_mask is None:
self.drop_inputs_mask = F.Dropout(F.ones_like(inputs),
p=self.drop_inputs)
def _initialize_output_mask(self, F, output):
if self.drop_outputs and self.drop_outputs_mask is None:
self.drop_outputs_mask = F.Dropout(F.ones_like(output),
p=self.drop_outputs)
def hybrid_forward(self, F, inputs, states):
cell = self.base_cell
self._initialize_input_masks(F, inputs, states)
if self.drop_states:
states = list(states)
# state dropout only needs to be applied on h, which is always the first state.
states[0] = states[0] * self.drop_states_mask
if self.drop_inputs:
inputs = inputs * self.drop_inputs_mask
next_output, next_states = cell(inputs, states)
self._initialize_output_mask(F, next_output)
if self.drop_outputs:
next_output = next_output * self.drop_outputs_mask
return next_output, next_states
def __repr__(self):
s = '{name}(p_out = {drop_outputs}, p_state = {drop_states})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
# Dropout on inputs and outputs can be performed on the whole sequence
# only when state dropout is not present.
if self.drop_states:
return super(VariationalDropoutCell, self).unroll(length, inputs, begin_state,
layout, merge_outputs)
self.reset()
inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, True)
states = _get_begin_state(self, F, begin_state, inputs, batch_size)
if self.drop_inputs:
first_input = inputs.slice_axis(axis, 0, 1).split(1, axis=axis, squeeze_axis=True)
self._initialize_input_masks(F, first_input, states)
inputs = F.broadcast_mul(inputs, self.drop_inputs_mask.expand_dims(axis=axis))
outputs, states = self.base_cell.unroll(length, inputs, states, layout, merge_outputs=True)
if self.drop_outputs:
first_output = outputs.slice_axis(axis, 0, 1).split(1, axis=axis, squeeze_axis=True)
self._initialize_output_mask(F, first_output)
outputs = F.broadcast_mul(outputs, self.drop_outputs_mask.expand_dims(axis=axis))
outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs)
return outputs, states
| apache-2.0 |
achow101/bitcoin | test/functional/feature_reindex.py | 28 | 1445 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ReindexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generatetoaddress(3, self.nodes[0].get_deterministic_priv_key().address)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
self.start_nodes(extra_args)
assert_equal(self.nodes[0].getblockcount(), blockcount) # start_node is blocking on reindex
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| mit |
jmcarp/django | tests/auth_tests/test_basic.py | 328 | 4643 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
| bsd-3-clause |
jordiclariana/ansible | lib/ansible/modules/univention/udm_dns_zone.py | 27 | 7625 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: udm_dns_zone
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage dns zones on a univention corporate server
description:
- "This module allows to manage dns zones on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the dns zone is present or not.
type:
required: true
choices: [ forward_zone, reverse_zone ]
description:
- Define if the zone is a forward or reverse DNS zone.
zone:
required: true
description:
- DNS zone name, e.g. C(example.com).
nameserver:
required: false
description:
- List of appropriate name servers. Required if C(state=present).
interfaces:
required: false
description:
- List of interface IP addresses, on which the server should
response this zone. Required if C(state=present).
refresh:
required: false
default: 3600
description:
- Interval before the zone should be refreshed.
retry:
required: false
default: 1800
description:
- Interval that should elapse before a failed refresh should be retried.
expire:
required: false
default: 604800
description:
- Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
ttl:
required: false
default: 600
description:
- Minimum TTL field that should be exported with any RR from this zone.
contact:
required: false
default: ''
description:
- Contact person in the SOA record.
mx:
required: false
default: []
description:
- List of MX servers. (Must declared as A or AAAA records).
'''
EXAMPLES = '''
# Create a DNS zone on a UCS
- udm_dns_zone:
zone: example.com
type: forward_zone
nameserver:
- ucs.example.com
interfaces:
- 192.0.2.1
'''
RETURN = '''# '''
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
(24 * 60 * 60 , 'days'),
(60 * 60 , 'hours'),
(60 , 'minutes'),
(1 , 'seconds'),
]
if time == 0:
return ('0', 'seconds')
for unit in units:
if time >= unit[0]:
return ('{}'.format(time // unit[0]), unit[1])
def main():
module = AnsibleModule(
argument_spec = dict(
type = dict(required=True,
type='str'),
zone = dict(required=True,
aliases=['name'],
type='str'),
nameserver = dict(default=[],
type='list'),
interfaces = dict(default=[],
type='list'),
refresh = dict(default=3600,
type='int'),
retry = dict(default=1800,
type='int'),
expire = dict(default=604800,
type='int'),
ttl = dict(default=600,
type='int'),
contact = dict(default='',
type='str'),
mx = dict(default=[],
type='list'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['nameserver', 'interfaces'])
])
)
type = module.params['type']
zone = module.params['zone']
nameserver = module.params['nameserver']
interfaces = module.params['interfaces']
refresh = module.params['refresh']
retry = module.params['retry']
expire = module.params['expire']
ttl = module.params['ttl']
contact = module.params['contact']
mx = module.params['mx']
state = module.params['state']
changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
attr=['dNSZone']
))
exists = bool(len(obj))
container = 'cn=dns,{}'.format(base_dn())
dn = 'zoneName={},{}'.format(zone, container)
if contact == '':
contact = 'root@{}.'.format(zone)
if state == 'present':
try:
if not exists:
obj = umc_module_for_add('dns/{}'.format(type), container)
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
obj['zone'] = zone
obj['nameserver'] = nameserver
obj['a'] = interfaces
obj['refresh'] = convert_time(refresh)
obj['retry'] = convert_time(retry)
obj['expire'] = convert_time(expire)
obj['ttl'] = convert_time(ttl)
obj['contact'] = contact
obj['mx'] = mx
diff = obj.diff()
if exists:
for k in obj.keys():
if obj.hasChanged(k):
changed = True
else:
changed = True
if not module.check_mode:
if not exists:
obj.create()
elif changed:
obj.modify()
except Exception as e:
module.fail_json(
msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
if not module.check_mode:
obj.remove()
changed = True
except Exception as e:
module.fail_json(
msg='Removing dns zone {} failed: {}'.format(zone, e)
)
module.exit_json(
changed=changed,
diff=diff,
zone=zone
)
if __name__ == '__main__':
main()
| gpl-3.0 |
asdofindia/kitsune | kitsune/dashboards/urls.py | 21 | 1429 | from django.conf.urls import patterns, url
from kitsune.dashboards import api
urlpatterns = patterns(
'kitsune.dashboards.views',
url(r'^localization$', 'localization', name='dashboards.localization'),
url(r'^contributors$', 'contributors', name='dashboards.contributors'),
url(r'^contributors-old$', 'contributors_old',
name='dashboards.contributors_old'),
url(r'^contributors/overview-rows$', 'contributors_overview_rows',
name='dashboards.contributors_overview_rows'),
url(r'^wiki-rows/(?P<readout_slug>[^/]+)', 'wiki_rows',
name='dashboards.wiki_rows'),
url(r'^localization/(?P<readout_slug>[^/]+)', 'localization_detail',
name='dashboards.localization_detail'),
url(r'^contributors/kb-overview$', 'contributors_overview',
name='dashboards.contributors_overview'),
url(r'^contributors/(?P<readout_slug>[^/]+)', 'contributors_detail',
name='dashboards.contributors_detail'),
# The aggregated kb metrics dashboard.
url(r'^kb/dashboard/metrics/aggregated$', 'aggregated_metrics',
name='dashboards.aggregated_metrics'),
# The per-locale kb metrics dashboard.
url(r'^kb/dashboard/metrics/(?P<locale_code>[^/]+)$', 'locale_metrics',
name='dashboards.locale_metrics'),
# API to pull wiki metrics data.
url(r'^api/v1/wikimetrics/?$', api.WikiMetricList.as_view(),
name='api.wikimetric_list'),
)
| bsd-3-clause |
xiaozhuchacha/OpenBottle | grammar_induction/earley_parser/nltk/tokenize/treebank.py | 5 | 4625 | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Michael Heilman <mheilman@cmu.edu> (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed)
#
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
r"""
Penn Treebank Tokenizer
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
This implementation is a port of the tokenizer sed script written by Robert McIntyre
and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed.
"""
import re
from nltk.tokenize.api import TokenizerI
class TreebankWordTokenizer(TokenizerI):
"""
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
This is the method that is invoked by ``word_tokenize()``. It assumes that the
text has already been segmented into sentences, e.g. using ``sent_tokenize()``.
This tokenizer performs the following steps:
- split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll``
- treat most punctuation characters as separate tokens
- split off commas and single quotes, when followed by whitespace
- separate periods that appear at the end of line
>>> from nltk.tokenize import TreebankWordTokenizer
>>> s = '''Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.'''
>>> TreebankWordTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.']
>>> s = "They'll save and invest more."
>>> TreebankWordTokenizer().tokenize(s)
['They', "'ll", 'save', 'and', 'invest', 'more', '.']
>>> s = "hi, my name can't hello,"
>>> TreebankWordTokenizer().tokenize(s)
['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ',']
"""
#starting quotes
STARTING_QUOTES = [
(re.compile(r'^\"'), r'``'),
(re.compile(r'(``)'), r' \1 '),
(re.compile(r'([ (\[{<])"'), r'\1 `` '),
]
#punctuation
PUNCTUATION = [
(re.compile(r'([:,])([^\d])'), r' \1 \2'),
(re.compile(r'([:,])$'), r' \1 '),
(re.compile(r'\.\.\.'), r' ... '),
(re.compile(r'[;@#$%&]'), r' \g<0> '),
(re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), r'\1 \2\3 '),
(re.compile(r'[?!]'), r' \g<0> '),
(re.compile(r"([^'])' "), r"\1 ' "),
]
#parens, brackets, etc.
PARENS_BRACKETS = [
(re.compile(r'[\]\[\(\)\{\}\<\>]'), r' \g<0> '),
(re.compile(r'--'), r' -- '),
]
#ending quotes
ENDING_QUOTES = [
(re.compile(r'"'), " '' "),
(re.compile(r'(\S)(\'\')'), r'\1 \2 '),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# List of contractions adapted from Robert MacIntyre's tokenizer.
CONTRACTIONS2 = [re.compile(r"(?i)\b(can)(not)\b"),
re.compile(r"(?i)\b(d)('ye)\b"),
re.compile(r"(?i)\b(gim)(me)\b"),
re.compile(r"(?i)\b(gon)(na)\b"),
re.compile(r"(?i)\b(got)(ta)\b"),
re.compile(r"(?i)\b(lem)(me)\b"),
re.compile(r"(?i)\b(mor)('n)\b"),
re.compile(r"(?i)\b(wan)(na) ")]
CONTRACTIONS3 = [re.compile(r"(?i) ('t)(is)\b"),
re.compile(r"(?i) ('t)(was)\b")]
CONTRACTIONS4 = [re.compile(r"(?i)\b(whad)(dd)(ya)\b"),
re.compile(r"(?i)\b(wha)(t)(cha)\b")]
def tokenize(self, text):
for regexp, substitution in self.STARTING_QUOTES:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PUNCTUATION:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PARENS_BRACKETS:
text = regexp.sub(substitution, text)
#add extra space to make things easier
text = " " + text + " "
for regexp, substitution in self.ENDING_QUOTES:
text = regexp.sub(substitution, text)
for regexp in self.CONTRACTIONS2:
text = regexp.sub(r' \1 \2 ', text)
for regexp in self.CONTRACTIONS3:
text = regexp.sub(r' \1 \2 ', text)
# We are not using CONTRACTIONS4 since
# they are also commented out in the SED scripts
# for regexp in self.CONTRACTIONS4:
# text = regexp.sub(r' \1 \2 \3 ', text)
return text.split()
| mit |
Designist/audacity | lib-src/lv2/lv2/plugins/eg-metro.lv2/waflib/Tools/gxx.py | 196 | 2806 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_gxx(conf):
cxx=conf.find_program(['g++','c++'],var='CXX')
cxx=conf.cmd_to_list(cxx)
conf.get_cc_version(cxx,gcc=True)
conf.env.CXX_NAME='gcc'
conf.env.CXX=cxx
@conf
def gxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Wl,-Bdynamic'
v['STLIB_MARKER']='-Wl,-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-shared']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Wl,-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
v['LINKFLAGS_MACBUNDLE']=['-bundle','-undefined','dynamic_lookup']
v['CXXFLAGS_MACBUNDLE']=['-fPIC']
v['macbundle_PATTERN']='%s.bundle'
@conf
def gxx_modifier_win32(conf):
v=conf.env
v['cxxprogram_PATTERN']='%s.exe'
v['cxxshlib_PATTERN']='%s.dll'
v['implib_PATTERN']='lib%s.dll.a'
v['IMPLIB_ST']='-Wl,--out-implib,%s'
v['CXXFLAGS_cxxshlib']=[]
v.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
@conf
def gxx_modifier_cygwin(conf):
gxx_modifier_win32(conf)
v=conf.env
v['cxxshlib_PATTERN']='cyg%s.dll'
v.append_value('LINKFLAGS_cxxshlib',['-Wl,--enable-auto-image-base'])
v['CXXFLAGS_cxxshlib']=[]
@conf
def gxx_modifier_darwin(conf):
v=conf.env
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-dynamiclib','-Wl,-compatibility_version,1','-Wl,-current_version,1']
v['cxxshlib_PATTERN']='lib%s.dylib'
v['FRAMEWORKPATH_ST']='-F%s'
v['FRAMEWORK_ST']=['-framework']
v['ARCH_ST']=['-arch']
v['LINKFLAGS_cxxstlib']=[]
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']=[]
v['SONAME_ST']=[]
@conf
def gxx_modifier_aix(conf):
v=conf.env
v['LINKFLAGS_cxxprogram']=['-Wl,-brtl']
v['LINKFLAGS_cxxshlib']=['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER']=[]
@conf
def gxx_modifier_hpux(conf):
v=conf.env
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']='-Bstatic'
v['CFLAGS_cxxshlib']=['-fPIC','-DPIC']
v['cxxshlib_PATTERN']='lib%s.sl'
@conf
def gxx_modifier_openbsd(conf):
conf.env.SONAME_ST=[]
@conf
def gxx_modifier_platform(conf):
gxx_modifier_func=getattr(conf,'gxx_modifier_'+conf.env.DEST_OS,None)
if gxx_modifier_func:
gxx_modifier_func()
def configure(conf):
conf.find_gxx()
conf.find_ar()
conf.gxx_common_flags()
conf.gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| gpl-2.0 |
alvarolopez/nova | nova/api/openstack/compute/contrib/hypervisors.py | 58 | 9452 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import servicegroup
authorize = extensions.extension_authorizer('compute', 'hypervisors')
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
self.ext_mgr = ext_mgr
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
}
ext_status_loaded = self.ext_mgr.is_loaded('os-hypervisor-status')
if ext_status_loaded:
alive = self.servicegroup_api.service_is_up(service)
hyp_dict['state'] = 'up' if alive else "down"
hyp_dict['status'] = (
'disabled' if service.disabled else 'enabled')
if detail and not servers:
fields = ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least')
ext_loaded = self.ext_mgr.is_loaded('os-extended-hypervisors')
if ext_loaded:
fields += ('host_ip',)
for field in fields:
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
}
if ext_status_loaded:
hyp_dict['service'].update(
disabled_reason=service.disabled_reason)
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
def index(self, req):
context = req.environ['nova.context']
authorize(context)
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(
context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(self.ext_mgr),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
| apache-2.0 |
pombredanne/MOG | nova/virt/docker/hostinfo.py | 13 | 2051 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def statvfs():
docker_path = '/var/lib/docker'
if not os.path.exists(docker_path):
docker_path = '/'
return os.statvfs(docker_path)
def get_meminfo():
with open('/proc/meminfo') as f:
return f.readlines()
def get_disk_usage():
# This is the location where Docker stores its containers. It's currently
# hardcoded in Docker so it's not configurable yet.
st = statvfs()
return {
'total': st.f_blocks * st.f_frsize,
'available': st.f_bavail * st.f_frsize,
'used': (st.f_blocks - st.f_bfree) * st.f_frsize
}
def parse_meminfo():
meminfo = {}
for ln in get_meminfo():
parts = ln.split(':')
if len(parts) < 2:
continue
key = parts[0].lower()
value = parts[1].strip()
parts = value.split(' ')
value = parts[0]
if not value.isdigit():
continue
value = int(parts[0])
if len(parts) > 1 and parts[1] == 'kB':
value *= 1024
meminfo[key] = value
return meminfo
def get_memory_usage():
meminfo = parse_meminfo()
total = meminfo.get('memtotal', 0)
free = meminfo.get('memfree', 0)
free += meminfo.get('cached', 0)
free += meminfo.get('buffers', 0)
return {
'total': total,
'free': free,
'used': total - free
}
| apache-2.0 |
GunoH/intellij-community | python/testData/inspections/PyTypeCheckerInspection/PathLikePassedToStdlibFunctions.py | 2 | 1192 | import os.path
from pathlib import Path, PurePath
# os.PathLike
class A:
def __fspath__(self) -> str:
pass
a = A()
open(a)
os.fspath(a)
os.fsencode(a)
os.fsdecode(a)
Path(a)
PurePath(a)
os.path.abspath(a)
# not os.PathLike
class B:
pass
b = B()
open(<warning descr="Expected type 'Union[str, bytes, PathLike[str], PathLike[bytes], int]', got 'B' instead">b</warning>)
os.fspath(<warning descr="Unexpected type(s):(B)Possible type(s):(PathLike[Union[Union[str, bytes], Any]])(bytes)(str)">b</warning>)
os.fsencode(<warning descr="Expected type 'Union[str, bytes, PathLike]', got 'B' instead">b</warning>)
os.fsdecode(<warning descr="Expected type 'Union[str, bytes, PathLike]', got 'B' instead">b</warning>)
Path(<warning descr="Expected type 'Union[str, PathLike[str]]', got 'B' instead">b</warning>)
PurePath(<warning descr="Expected type 'Union[str, PathLike[str]]', got 'B' instead">b</warning>)
os.path.abspath(<warning descr="Unexpected type(s):(B)Possible type(s):(AnyStr)(PathLike[Union[Union[str, bytes], Any]])">b</warning>)
# pathlib.PurePath
p = Path(".")
open(p)
os.fspath(p)
os.fsencode(p)
os.fsdecode(p)
Path(p)
PurePath(p)
os.path.abspath(p) | apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django_countries/tests/test_widgets.py | 7 | 2159 | from __future__ import unicode_literals
try:
from urllib import parse as urlparse
except ImportError:
import urlparse # Python 2
from django.forms.models import modelform_factory
from django.test import TestCase
from django.utils import safestring
from django.utils.html import escape
from django_countries import widgets, countries, fields
from django_countries.conf import settings
from django_countries.tests.models import Person
class TestCountrySelectWidget(TestCase):
def setUp(self):
del countries.countries
self.Form = modelform_factory(
Person, fields=['country'],
widgets={'country': widgets.CountrySelectWidget})
def tearDown(self):
del countries.countries
def test_not_default_widget(self):
Form = modelform_factory(Person, fields=['country'])
widget = Form().fields['country'].widget
self.assertFalse(isinstance(widget, widgets.CountrySelectWidget))
def test_render_contains_flag_url(self):
with self.settings(COUNTRIES_ONLY={'AU': 'Desert'}):
html = self.Form().as_p()
self.assertIn(escape(urlparse.urljoin(
settings.STATIC_URL, settings.COUNTRIES_FLAG_URL)), html)
def test_render(self):
with self.settings(COUNTRIES_ONLY={'AU': 'Desert'}):
html = self.Form().as_p()
self.assertIn(fields.Country('__').flag, html)
self.assertNotIn(fields.Country('AU').flag, html)
def test_render_initial(self):
with self.settings(COUNTRIES_ONLY={'AU': 'Desert'}):
html = self.Form(initial={'country': 'AU'}).as_p()
self.assertIn(fields.Country('AU').flag, html)
self.assertNotIn(fields.Country('__').flag, html)
def test_render_escaping(self):
output = widgets.CountrySelectWidget().render('test', '<script>')
self.assertIn('<script>', output)
self.assertNotIn('<script>', output)
self.assertTrue(isinstance(output, safestring.SafeData))
def test_render_modelform_instance(self):
person = Person(country='NZ')
self.Form(instance=person).as_p()
| agpl-3.0 |
MaterialsDiscovery/PyChemia | pychemia/utils/serializer.py | 1 | 2457 |
import json
from abc import ABCMeta, abstractmethod
import numpy as np
from pychemia.utils.computing import deep_unicode
from numbers import Integral, Real
class PyChemiaJsonable(object):
"""
Abstract base class specifying how to convert objects from/to dictionaries.
PyChemiaJsonable objects must implement a to_dict property and a from_dict static method.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def to_dict(self):
"""
A JSON representation of an object.
"""
pass
@classmethod
def from_dict(cls, json_dict):
"""
This implements a default from_dict method which supports all
classes that simply try to recreate an object using the keys
as arguments for the creation of the new object.
:param json_dict: Recreate an object from its serialize form
:return:
"""
argstring = ''
for key in json_dict:
argstring += key + '=' + str(json_dict[key]) + ', '
argstring = argstring[:-2]
print(str(cls) + '(' + argstring + ')')
return eval(str(cls) + '(' + argstring + ')')
@property
def to_json(self):
"""
Returns a json string representation of the object.
"""
return json.dumps(self)
def save_to_file(self, filename):
"""
Writes the json representation to a file.
:param filename: (str) Filename for the json that will be created
"""
with open(filename, "w") as f:
json.dump(self, f)
def generic_serializer(value):
"""
A generic serializer for very common values
:param value:
:return:
"""
value = deep_unicode(value)
if value is None:
return None
elif isinstance(value, dict):
new_value = {}
for i in value:
new_value[i] = generic_serializer(value[i])
return new_value
elif hasattr(value, '__iter__'):
return [generic_serializer(element) for element in value]
elif isinstance(value, str):
return value
elif isinstance(value, Integral):
return int(value)
elif isinstance(value, Real):
return float(value)
elif isinstance(value, np.integer):
return int(value)
elif isinstance(value, np.float):
return float(value)
else:
raise ValueError("Could not serialize this: %s of type: %s" % (value, type(value)))
| mit |
JavML/django | django/db/models/sql/constants.py | 633 | 1039 | """
Constants specific to the SQL storage portion of the ORM.
"""
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
# SQL join types.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
| bsd-3-clause |
350dotorg/Django | tests/regressiontests/utils/html.py | 4 | 4041 | import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
| bsd-3-clause |
supergis/QGIS | python/ext-libs/pygments/formatters/bbcode.py | 362 | 3314 | # -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| gpl-2.0 |
twitter/pants | contrib/node/src/python/pants/contrib/node/subsystems/package_managers.py | 2 | 9205 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from builtins import object
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
frozen_lockfile=True,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force,
frozen_lockfile=frozen_lockfile)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
if frozen_lockfile:
return_args.append('--frozen-lockfile')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
if frozen_lockfile:
LOG.warning('{} does not support frozen lockfile option. Ignored.'.format(self.name))
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
| apache-2.0 |
tuxos/Django-facebook | django_facebook/model_managers.py | 24 | 4038 | from django.db.models.query_utils import Q
from django.core.cache import cache
from django.db import models
import operator
import random
from datetime import timedelta
from django_facebook.utils import compatible_datetime as datetime
from django.contrib.contenttypes.models import ContentType
import logging
from open_facebook.exceptions import OAuthException, UnsupportedDeleteRequest
logger = logging.getLogger(__name__)
class FacebookUserManager(models.Manager):
def find_users(self, queries, base_queryset=None):
'''
Queries, a list of search queries
Base Queryset, the base queryset in which we are searching
'''
if base_queryset is None:
base_queryset = self.all()
filters = []
for query in queries:
match = Q(
name__istartswith=query) | Q(name__icontains=' %s' % query)
filters.append(match)
users = base_queryset.filter(reduce(operator.and_, filters))
return users
def random_facebook_friends(self, user, gender=None, limit=3):
'''
Returns a random sample of your FB friends
Limit = Number of friends
Gender = None, M or F
'''
assert gender in (
None, 'M', 'F'), 'Gender %s wasnt recognized' % gender
from django_facebook.utils import get_profile_model
facebook_cache_key = 'facebook_users_%s' % user.id
non_members = cache.get(facebook_cache_key)
profile_class = get_profile_model()
if not non_members:
facebook_users = list(
self.filter(user_id=user.id, gender=gender)[:50])
facebook_ids = [u.facebook_id for u in facebook_users]
members = list(profile_class.objects.filter(
facebook_id__in=facebook_ids).select_related('user'))
member_ids = [p.facebook_id for p in members]
non_members = [
u for u in facebook_users if u.facebook_id not in member_ids]
cache.set(facebook_cache_key, non_members, 60 * 60)
random_limit = min(len(non_members), 3)
random_facebook_users = []
if random_limit:
random_facebook_users = random.sample(non_members, random_limit)
return random_facebook_users
class OpenGraphShareManager(models.Manager):
def failed(self):
qs = self.filter(completed_at__isnull=True)
return qs
def recently_failed(self):
from django_facebook import settings as facebook_settings
now = datetime.now()
recent_delta = timedelta(
days=facebook_settings.FACEBOOK_OG_SHARE_RETRY_DAYS)
recent = now - recent_delta
failed = self.failed()
recently_failed = failed.filter(created_at__gte=recent)
return recently_failed
def shares_for_instance(self, instance, user):
content_type = ContentType.objects.get_for_model(instance)
shares = self.filter(
user=user,
object_id=instance.id,
content_type=content_type,
completed_at__isnull=False,
removed_at__isnull=True,
)
return shares
def remove_shares_for_instance(self, content_object, user):
'''
Removes all shares for this content_object and user combination
'''
shares = self.shares_for_instance(content_object, user)
shares = shares.filter(
completed_at__isnull=False, removed_at__isnull=True)
shares = list(shares[:1000])
logger.info('found %s shares to remove', len(shares))
for share in shares:
logger.info('removed share %s', share)
try:
share.remove()
except (OAuthException, UnsupportedDeleteRequest) as e:
# oauth exceptions happen when tokens are removed
# unsupported delete requests when the resource is already
# removed
logger.info('removing share failed, got error %s', e)
| bsd-3-clause |
3dfxsoftware/cbss-addons | mrp_account_variation/mrp_account_variation_price.py | 1 | 6744 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# info@vauxoo.com
############################################################################
# Coded by: julio (julio@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.netsvc as netsvc
import time
class mrp_production(osv.Model):
_inherit = 'mrp.production'
def action_finish(self, cr, uid, ids, context={}):
res = super(mrp_production, self).action_finish(
cr, uid, ids, context=context)
self.create_move_variation_price(cr, uid, ids, context=context)
return res
def create_move_variation_price(self, cr, uid, ids, context={}):
move_obj = self.pool.get('account.move')
product_uom_pool = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids, context=context):
if production.product_id.valuation == 'real_time':
account_moves = []
total_product_consumed = 0.0
total_product_finished = 0.0
for prod_consumed in production.product_lines:
product_consumed = product_uom_pool._compute_qty(
cr, uid, prod_consumed.product_uom.id,
prod_consumed.product_qty,
to_uom_id=prod_consumed.product_id.uom_id.id)
total_product_consumed += product_consumed * \
prod_consumed.product_id.standard_price
for prod_finished in production.pt_planified_ids:
product_finished = product_uom_pool._compute_qty(
cr, uid, prod_finished.product_uom.id,
prod_finished.quantity,
to_uom_id=prod_finished.product_id.uom_id.id)
total_product_finished += product_finished * \
prod_finished.product_id.standard_price
if production.product_id.property_stock_production:
if total_product_consumed > total_product_finished:
if production.product_id.property_stock_production.property_account_in_production_price_difference:
src_account_id = production.product_id.property_stock_production.valuation_out_account_id.id
dest_account_id = production.product_id.property_stock_production.property_account_in_production_price_difference.id
reference_amount = (
total_product_consumed -\
total_product_finished)
journal_id = production.product_id.categ_id.property_stock_journal.id
account_moves = [(journal_id,
self.create_account_variation_price_move_line(
cr, uid, production, src_account_id,
dest_account_id, reference_amount,
context=None))]
if total_product_consumed < total_product_finished:
if production.product_id.property_stock_production.property_account_out_production_price_difference:
src_account_id = production.product_id.property_stock_production.property_account_out_production_price_difference.id
dest_account_id = production.product_id.property_stock_production.valuation_in_account_id.id
reference_amount = (
total_product_consumed -\
total_product_finished)*-1
journal_id = production.product_id.categ_id.property_stock_journal.id
account_moves = [(journal_id,
self.create_account_variation_price_move_line(
cr, uid, production, src_account_id,
dest_account_id, reference_amount,
context=None))]
if account_moves:
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'ref': 'PROD: ' + production.name +\
' - ' + _('Deflection by\
difference on consume RM vs FP')})
return True
def create_account_variation_price_move_line(self, cr, uid, production,
src_account_id,
dest_account_id,
reference_amount,
context=None):
debit_line_vals = {
'name': 'PROD: ' + production.name or '',
'date': time.strftime('%Y-%m-%d'),
'debit': reference_amount,
'account_id': dest_account_id,
'production_id': production.id
}
credit_line_vals = {
'name': 'PROD: ' + production.name or '',
'date': time.strftime('%Y-%m-%d'),
'credit': reference_amount,
'account_id': src_account_id,
'production_id': production.id
}
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
| gpl-2.0 |
Beauhurst/django | django/dispatch/dispatcher.py | 42 | 10848 | import threading
import weakref
from django.utils.inspect import func_accepts_kwargs
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| bsd-3-clause |
ylatuya/Flumotion | flumotion/component/bouncers/htpasswdcrypt.py | 1 | 3023 | # -*- Mode: Python; test-case-name: flumotion.test.test_htpasswdcrypt -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""
an htpasswd-backed bouncer with crypt passwords
"""
import random
from twisted.python import components
from twisted.internet import defer
from flumotion.common import interfaces, keycards, log, errors
from flumotion.component import component
from flumotion.component.bouncers import bouncer
from flumotion.twisted import credentials, checkers
__all__ = ['HTPasswdCrypt']
__version__ = "$Rev$"
class HTPasswdCrypt(bouncer.ChallengeResponseBouncer):
logCategory = 'htpasswdcrypt'
# challenger type first, because it's more secure thus preferable
keycardClasses = (keycards.KeycardUACPCC, keycards.KeycardUACPP)
challengeResponseClasses = (keycards.KeycardUACPCC, )
def do_setup(self):
conf = self.config
# we need either a filename or data
filename = None
data = None
props = conf['properties']
if 'filename' in props:
filename = props['filename']
self.debug('using file %s for passwords', filename)
elif 'data' in props:
data = props['data']
self.debug('using in-line data for passwords')
else:
return defer.fail(errors.ConfigError(
'HTPasswdCrypt needs either a <data> or <filename> entry'))
# FIXME: generalize to a start method, possibly linked to mood
if filename:
try:
lines = open(filename).readlines()
except IOError, e:
return defer.fail(errors.ConfigError(str(e)))
else:
lines = data.split("\n")
self.setChecker(checkers.CryptChecker())
for line in lines:
if not ':' in line:
continue
# when coming from a file, it ends in \n, so strip.
# for data, we already splitted, so no \n, but strip is fine.
name, cryptPassword = line.strip().split(':')
self.addUser(name, cryptPassword[:2], cryptPassword)
self.debug('parsed %s, %d lines' % (filename or '<memory>',
len(lines)))
return defer.succeed(None)
| gpl-2.0 |
thepiper/standoff | vpy/lib/python2.7/site-packages/pip/_vendor/requests/auth.py | 413 | 6794 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| gpl-3.0 |
alexmogavero/home-assistant | tests/components/cover/test_rfxtrx.py | 25 | 9031 | """The tests for the Rfxtrx cover platform."""
import unittest
import pytest
from homeassistant.setup import setup_component
from homeassistant.components import rfxtrx as rfxtrx_core
from tests.common import get_test_home_assistant, mock_component
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestCoverRfxtrx(unittest.TestCase):
"""Test the Rfxtrx cover platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component('rfxtrx')
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config_capital_letters(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'2FF7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
'signal_repetitions': 3}
}}}))
def test_invalid_config_extra_key(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'invalid_key': 'afda',
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': '0b1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config_capital_packetid(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
'packetid': 'AA1100cd0213c7f210010f51',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_invalid_config_missing_packetid(self):
"""Test configuration."""
self.assertFalse(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'213c7f216': {
'name': 'Test',
rfxtrx_core.ATTR_FIREEVENT: True}
}}}))
def test_default_config(self):
"""Test with 0 cover."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'devices': {}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_one_cover(self):
"""Test with 1 cover."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'devices':
{'0b1400cd0213c7f210010f51': {
'name': 'Test'
}}}}))
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT =\
rfxtrxmod.Core("", transport_protocol=rfxtrxmod.DummyTransport)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
self.assertEqual(entity.signal_repetitions, 1)
self.assertFalse(entity.should_fire_event)
self.assertFalse(entity.should_poll)
entity.open_cover()
entity.close_cover()
entity.stop_cover()
def test_several_covers(self):
"""Test with 3 covers."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'signal_repetitions': 3,
'devices':
{'0b1100cd0213c7f230010f71': {
'name': 'Test'},
'0b1100100118cdea02010f70': {
'name': 'Bath'},
'0b1100101118cdea02010f70': {
'name': 'Living'}
}}}))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
self.assertEqual(entity.signal_repetitions, 3)
if entity.name == 'Living':
device_num = device_num + 1
elif entity.name == 'Bath':
device_num = device_num + 1
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual(3, device_num)
def test_discover_covers(self):
"""Test with discovery of covers."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a140002f38cae010f0070')
event.data = bytearray([0x0A, 0x14, 0x00, 0x02, 0xF3, 0x8C,
0xAE, 0x01, 0x0F, 0x00, 0x70])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a light
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x11, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x02, 0x0f, 0x70])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_cover_noautoadd(self):
"""Test with discovery of cover when auto add is False."""
self.assertTrue(setup_component(self.hass, 'cover', {
'cover': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a1400adf394ab010d0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x01, 0x0D, 0x00, 0x60])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a1400adf394ab020e0060')
event.data = bytearray([0x0A, 0x14, 0x00, 0xAD, 0xF3, 0x94,
0xAB, 0x02, 0x0E, 0x00, 0x60])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a sensor
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
# Trying to add a light
event = rfxtrx_core.get_rfx_object('0b1100100118cdea02010f70')
event.data = bytearray([0x0b, 0x11, 0x11, 0x10, 0x01,
0x18, 0xcd, 0xea, 0x01, 0x02, 0x0f, 0x70])
for evt_sub in rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS:
evt_sub(event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
| apache-2.0 |
funkshelper/woodcore | qa/rpc-tests/bip68-sequence.py | 101 | 18382 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print("Running test disable flag")
self.test_disable_flag()
print("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
print("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
print("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
print("Verifying nVersion=2 transactions aren't standard")
self.test_version2_relay(before_activation=True)
print("Activating BIP68 (and 112/113)")
self.activateCSV()
print("Verifying nVersion=2 transactions are now standard")
self.test_version2_relay(before_activation=False)
print("Passed\n")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
| mit |
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeRemoteSignalSmartPositionMAOptimizerTwoAsset.py | 1 | 6253 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
#Changes are pretty much immaterial, use VXV/VIX
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Remote Signal
Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 300000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = rand.random()
if c + d > 1:
continue
e = rand.randint(3,30)
f = rand.randint(3,30)
g = rand.randint(0,30)
window = int(e)
window2 = int(f)
window3 = int(g)
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Signal'] = np.where(Asset3['MA'].shift(1) > Asset3['Adj Close'].shift(1),
1, 0)
Asset3['CumulativeRollingSignal'] = Asset3['Signal'].rolling(window = window2).sum()
Asset1['Position'] = a
Asset1['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > window3, c, a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > window3, d, b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
# Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = (Portfolio['Asset1Pass']) + (Portfolio['Asset2Pass'])
# Portfolio['LongShort'][-180:].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.3):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .002:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(f)
Empty.append(g)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[8]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[8]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[8]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
window2 = int((Dataset[kfloat][5]))
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['CumulativeRollingSignal'] = Asset3['Signal'].rolling(window = window2).sum()
Asset1['Position'] = Dataset[kfloat][0]
Asset1['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > Dataset[kfloat][5],
Dataset[kfloat][2], Dataset[kfloat][0])
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = Dataset[kfloat][1]
Asset2['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > Dataset[kfloat][5],
Dataset[kfloat][3], Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_sys_setprofile.py | 123 | 11378 | import gc
import pprint
import sys
import unittest
from test import test_support
class TestGetProfile(unittest.TestCase):
def setUp(self):
sys.setprofile(None)
def tearDown(self):
sys.setprofile(None)
def test_empty(self):
self.assertIsNone(sys.getprofile())
def test_setget(self):
def fn(*args):
pass
sys.setprofile(fn)
self.assertIs(sys.getprofile(), fn)
class HookWatcher:
def __init__(self):
self.frames = []
self.events = []
def callback(self, frame, event, arg):
if (event == "call"
or event == "return"
or event == "exception"):
self.add_event(event, frame)
def add_event(self, event, frame=None):
"""Add an event to the log."""
if frame is None:
frame = sys._getframe(1)
try:
frameno = self.frames.index(frame)
except ValueError:
frameno = len(self.frames)
self.frames.append(frame)
self.events.append((frameno, event, ident(frame)))
def get_events(self):
"""Remove calls to add_event()."""
disallowed = [ident(self.add_event.im_func), ident(ident)]
self.frames = None
return [item for item in self.events if item[2] not in disallowed]
class ProfileSimulator(HookWatcher):
def __init__(self, testcase):
self.testcase = testcase
self.stack = []
HookWatcher.__init__(self)
def callback(self, frame, event, arg):
# Callback registered with sys.setprofile()/sys.settrace()
self.dispatch[event](self, frame)
def trace_call(self, frame):
self.add_event('call', frame)
self.stack.append(frame)
def trace_return(self, frame):
self.add_event('return', frame)
self.stack.pop()
def trace_exception(self, frame):
self.testcase.fail(
"the profiler should never receive exception events")
def trace_pass(self, frame):
pass
dispatch = {
'call': trace_call,
'exception': trace_exception,
'return': trace_return,
'c_call': trace_pass,
'c_return': trace_pass,
'c_exception': trace_pass,
}
class TestCaseBase(unittest.TestCase):
def check_events(self, callable, expected):
events = capture_events(callable, self.new_watcher())
if events != expected:
self.fail("Expected events:\n%s\nReceived events:\n%s"
% (pprint.pformat(expected), pprint.pformat(events)))
class ProfileHookTestCase(TestCaseBase):
def new_watcher(self):
return HookWatcher()
def test_simple(self):
def f(p):
pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_exception(self):
def f(p):
1./0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_exception(self):
def f(p):
try: 1./0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_nested_exception(self):
def f(p):
try: 1./0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_nested_exception(self):
def f(p):
1./0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
# This isn't what I expected:
# (0, 'exception', protect_ident),
# I expected this again:
(1, 'return', f_ident),
])
def test_exception_in_except_clause(self):
def f(p):
1./0
def g(p):
try:
f(p)
except:
try: f(p)
except: pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
(3, 'call', f_ident),
(3, 'return', f_ident),
(1, 'return', g_ident),
])
def test_exception_propogation(self):
def f(p):
1./0
def g(p):
try: f(p)
finally: p.add_event("falling through")
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'falling through', g_ident),
(1, 'return', g_ident),
])
def test_raise_twice(self):
def f(p):
try: 1./0
except: 1./0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_raise_reraise(self):
def f(p):
try: 1./0
except: raise
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_raise(self):
def f(p):
raise Exception()
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_distant_exception(self):
def f():
1./0
def g():
f()
def h():
g()
def i():
h()
def j(p):
i()
f_ident = ident(f)
g_ident = ident(g)
h_ident = ident(h)
i_ident = ident(i)
j_ident = ident(j)
self.check_events(j, [(1, 'call', j_ident),
(2, 'call', i_ident),
(3, 'call', h_ident),
(4, 'call', g_ident),
(5, 'call', f_ident),
(5, 'return', f_ident),
(4, 'return', g_ident),
(3, 'return', h_ident),
(2, 'return', i_ident),
(1, 'return', j_ident),
])
def test_generator(self):
def f():
for i in range(2):
yield i
def g(p):
for i in f():
pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
# call the iterator twice to generate values
(2, 'call', f_ident),
(2, 'return', f_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
# once more; returns end-of-iteration with
# actually raising an exception
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'return', g_ident),
])
def test_stop_iteration(self):
def f():
for i in range(2):
yield i
raise StopIteration
def g(p):
for i in f():
pass
f_ident = ident(f)
g_ident = ident(g)
self.check_events(g, [(1, 'call', g_ident),
# call the iterator twice to generate values
(2, 'call', f_ident),
(2, 'return', f_ident),
(2, 'call', f_ident),
(2, 'return', f_ident),
# once more to hit the raise:
(2, 'call', f_ident),
(2, 'return', f_ident),
(1, 'return', g_ident),
])
class ProfileSimulatorTestCase(TestCaseBase):
def new_watcher(self):
return ProfileSimulator(self)
def test_simple(self):
def f(p):
pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_basic_exception(self):
def f(p):
1./0
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_caught_exception(self):
def f(p):
try: 1./0
except: pass
f_ident = ident(f)
self.check_events(f, [(1, 'call', f_ident),
(1, 'return', f_ident),
])
def test_distant_exception(self):
def f():
1./0
def g():
f()
def h():
g()
def i():
h()
def j(p):
i()
f_ident = ident(f)
g_ident = ident(g)
h_ident = ident(h)
i_ident = ident(i)
j_ident = ident(j)
self.check_events(j, [(1, 'call', j_ident),
(2, 'call', i_ident),
(3, 'call', h_ident),
(4, 'call', g_ident),
(5, 'call', f_ident),
(5, 'return', f_ident),
(4, 'return', g_ident),
(3, 'return', h_ident),
(2, 'return', i_ident),
(1, 'return', j_ident),
])
def ident(function):
if hasattr(function, "f_code"):
code = function.f_code
else:
code = function.func_code
return code.co_firstlineno, code.co_name
def protect(f, p):
try: f(p)
except: pass
protect_ident = ident(protect)
def capture_events(callable, p=None):
if p is None:
p = HookWatcher()
# Disable the garbage collector. This prevents __del__s from showing up in
# traces.
old_gc = gc.isenabled()
gc.disable()
try:
sys.setprofile(p.callback)
protect(callable, p)
sys.setprofile(None)
finally:
if old_gc:
gc.enable()
return p.get_events()[1:-1]
def show_events(callable):
import pprint
pprint.pprint(capture_events(callable))
def test_main():
test_support.run_unittest(
TestGetProfile,
ProfileHookTestCase,
ProfileSimulatorTestCase
)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py | 90 | 17822 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
rng = np.random.RandomState(123)
class QuantizedDistributionTest(test.TestCase):
def _assert_all_finite(self, array):
self.assertTrue(np.isfinite(array).all())
def testQuantizationOfUniformWithCutoffsHavingNoEffect(self):
with self.test_session() as sess:
# The Quantized uniform with cutoffs == None divides the real line into:
# R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# Since this uniform (below) is supported on [0, 3],
# it places 1/3 of its mass in the intervals j = 1, 2, 3.
# Adding a cutoff at y = 0 changes the picture to
# R = ...(-inf, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# So the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
# Adding a cutoff at y = 3 changes the picture to
# R = ...(-1, 0](0, 1](1, 2](2, inf)
# j = ... 0 1 2 3
# and the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
for lcut, ucut in [(None, None), (0.0, None), (None, 3.0), (0.0, 3.0),
(-10., 10.)]:
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=0.0, high=3.0),
low=lcut,
high=ucut)
# pmf
pmf_n1, pmf_0, pmf_1, pmf_2, pmf_3, pmf_4, pmf_5 = sess.run(
qdist.prob([-1., 0., 1., 2., 3., 4., 5.]))
# uniform had no mass below -1.
self.assertAllClose(0., pmf_n1)
# uniform had no mass below 0.
self.assertAllClose(0., pmf_0)
# uniform put 1/3 of its mass in each of (0, 1], (1, 2], (2, 3],
# which are the intervals j = 1, 2, 3.
self.assertAllClose(1 / 3, pmf_1)
self.assertAllClose(1 / 3, pmf_2)
self.assertAllClose(1 / 3, pmf_3)
# uniform had no mass in (3, 4] or (4, 5], which are j = 4, 5.
self.assertAllClose(0 / 3, pmf_4)
self.assertAllClose(0 / 3, pmf_5)
# cdf
cdf_n1, cdf_0, cdf_1, cdf_2, cdf_2p5, cdf_3, cdf_4, cdf_5 = sess.run(
qdist.cdf([-1., 0., 1., 2., 2.5, 3., 4., 5.]))
self.assertAllClose(0., cdf_n1)
self.assertAllClose(0., cdf_0)
self.assertAllClose(1 / 3, cdf_1)
self.assertAllClose(2 / 3, cdf_2)
# Note fractional values allowed for cdfs of discrete distributions.
# And adding 0.5 makes no difference because the quantized dist has
# mass only on the integers, never in between.
self.assertAllClose(2 / 3, cdf_2p5)
self.assertAllClose(3 / 3, cdf_3)
self.assertAllClose(3 / 3, cdf_4)
self.assertAllClose(3 / 3, cdf_5)
def testQuantizationOfUniformWithCutoffsInTheMiddle(self):
with self.test_session() as sess:
# The uniform is supported on [-3, 3]
# Consider partitions the real line in intervals
# ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ...
# Before cutoffs, the uniform puts a mass of 1/6 in each interval written
# above. Because of cutoffs, the qdist considers intervals and indices
# ...(-infty, -1](-1, 0](0, infty) ...
# -1 0 1
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=-3., high=3.),
low=-1.0,
high=1.0)
# pmf
cdf_n3, cdf_n2, cdf_n1, cdf_0, cdf_0p5, cdf_1, cdf_10 = sess.run(
qdist.cdf([-3., -2., -1., 0., 0.5, 1.0, 10.0]))
# Uniform had no mass on (-4, -3] or (-3, -2]
self.assertAllClose(0., cdf_n3)
self.assertAllClose(0., cdf_n2)
# Uniform had 1/6 of its mass in each of (-3, -2], and (-2, -1], which
# were collapsed into (-infty, -1], which is now the "-1" interval.
self.assertAllClose(1 / 3, cdf_n1)
# The j=0 interval contained mass from (-3, 0], which is 1/2 of the
# uniform's mass.
self.assertAllClose(1 / 2, cdf_0)
# Adding 0.5 makes no difference because the quantized dist has mass on
# the integers, not in between them.
self.assertAllClose(1 / 2, cdf_0p5)
# After applying the cutoff, all mass was either in the interval
# (0, infty), or below. (0, infty) is the interval indexed by j=1,
# so pmf(1) should equal 1.
self.assertAllClose(1., cdf_1)
# Since no mass of qdist is above 1,
# pmf(10) = P[Y <= 10] = P[Y <= 1] = pmf(1).
self.assertAllClose(1., cdf_10)
def testQuantizationOfBatchOfUniforms(self):
batch_shape = (5, 5)
with self.test_session():
# The uniforms are supported on [0, 10]. The qdist considers the
# intervals
# ... (0, 1](1, 2]...(9, 10]...
# with the intervals displayed above each holding 1 / 10 of the mass.
# The qdist will be defined with no cutoffs,
uniform = distributions.Uniform(
low=array_ops.zeros(batch_shape, dtype=dtypes.float32),
high=10 * array_ops.ones(batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=uniform, low=None, high=None)
# x is random integers in {-3,...,12}.
x = rng.randint(-3, 13, size=batch_shape).astype(np.float32)
# pmf
# qdist.prob(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise,
expected_pmf = (1 / 10) * np.ones(batch_shape)
expected_pmf[x < 1] = 0.
expected_pmf[x > 10] = 0.
self.assertAllClose(expected_pmf, qdist.prob(x).eval())
# cdf
# qdist.cdf(j)
# = 0 for j < 1
# = j / 10, for j in {1,...,10},
# = 1, for j > 10.
expected_cdf = x.copy() / 10
expected_cdf[x < 1] = 0.
expected_cdf[x > 10] = 1.
self.assertAllClose(expected_cdf, qdist.cdf(x).eval())
def testSamplingFromBatchOfNormals(self):
batch_shape = (2,)
with self.test_session():
normal = distributions.Normal(
loc=array_ops.zeros(
batch_shape, dtype=dtypes.float32),
scale=array_ops.ones(
batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=normal, low=0., high=None)
samps = qdist.sample(5000, seed=42)
samps_v = samps.eval()
# With low = 0, the interval j=0 is (-infty, 0], which holds 1/2
# of the mass of the normals.
# rtol chosen to be 2x as large as necessary to pass.
self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
# The interval j=1 is (0, 1], which is from the mean to one standard
# deviation out. This should contain 0.6827 / 2 of the mass.
self.assertAllClose(
[0.6827 / 2, 0.6827 / 2], (samps_v == 1).mean(axis=0), rtol=0.03)
def testSamplesAgreeWithCdfForSamplesOverLargeRange(self):
# Consider the cdf for distribution X, F(x).
# If U ~ Uniform[0, 1], then Y := F^{-1}(U) is distributed like X since
# P[Y <= y] = P[F^{-1}(U) <= y] = P[U <= F(y)] = F(y).
# If F is a bijection, we also have Z = F(X) is Uniform.
#
# Make an exponential with large mean (= 100). This ensures we will get
# quantized values over a large range. This large range allows us to
# pretend that the cdf F is a bijection, and hence F(X) is uniform.
# Note that F cannot be bijection since it is constant between the
# integers. Hence, F(X) (see below) will not be uniform exactly.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.01))
# X ~ QuantizedExponential
x = qdist.sample(10000, seed=42)
# Z = F(X), should be Uniform.
z = qdist.cdf(x)
# Compare the CDF of Z to that of a Uniform.
# dist = maximum distance between P[Z <= a] and P[U <= a].
# We ignore pvalue, since of course this distribution is not exactly, and
# with so many sample points we would get a false fail.
dist, _ = stats.kstest(z.eval(), "uniform")
# Since the distribution take values (approximately) in [0, 100], the
# cdf should have jumps (approximately) every 1/100 of the way up.
# Assert that the jumps are not more than 2/100.
self.assertLess(dist, 0.02)
def testSamplesAgreeWithPdfForSamplesOverSmallRange(self):
# Testing that samples and pdf agree for a small range is important because
# it makes sure the bin edges are consistent.
# Make an exponential with mean 5.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.2))
# Standard error should be less than 1 / (2 * sqrt(n_samples))
n_samples = 10000
stddev_err_bound = 1 / (2 * np.sqrt(n_samples))
samps = qdist.sample((n_samples,), seed=42).eval()
# The smallest value the samples can take on is 1, which corresponds to
# the interval (0, 1]. Recall we use ceiling in the sampling definition.
self.assertLess(0.5, samps.min())
x_vals = np.arange(1, 11).astype(np.float32)
pmf_vals = qdist.prob(x_vals).eval()
for ii in range(10):
self.assertAllClose(
pmf_vals[ii], (samps == x_vals[ii]).mean(), atol=stddev_err_bound)
def testNormalCdfAndSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.cdf(x), qdist.cdf(x).eval())
self.assertAllClose(sp_normal.sf(x), qdist.survival_function(x).eval())
def testNormalLogCdfAndLogSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.logcdf(x), qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x), qdist.log_survival_function(x).eval())
def testNormalProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(sm_normal.cdf(-2), qdist.prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
sm_normal.cdf(-1) - sm_normal.cdf(-2), qdist.prob(-1.).eval(), atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
sm_normal.cdf(0) - sm_normal.cdf(-1), qdist.prob(0.).eval(), atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(1. - sm_normal.cdf(1), qdist.prob(2.).eval(), atol=0)
def testNormalLogProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)), qdist.log_prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)), qdist.log_prob(2.).eval(), atol=0)
def testLogProbAndGradGivesFiniteResults(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(0., name="mu", dtype=dtype)
sigma = variables.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = np.arange(-100, 100, 2).astype(dtype)
proba = qdist.log_prob(x)
grads = gradients_impl.gradients(proba, [mu, sigma])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testProbAndGradGivesFiniteResultsForCommonEvents(self):
with self.test_session():
mu = variables.Variable(0.0, name="mu")
sigma = variables.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = math_ops.ceil(4 * rng.rand(100).astype(np.float32) - 2)
variables.global_variables_initializer().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = gradients_impl.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testLowerCutoffMustBeBelowUpperCutoffOrWeRaise(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=1., # not strictly less than high.
high=1.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("must be strictly less"):
qdist.sample().eval()
def testCutoffsMustBeIntegerValuedIfValidateArgsTrue(self):
with self.test_session():
low = array_ops.placeholder(dtypes.float32)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=low,
high=10.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("has non-integer components"):
qdist.sample().eval(feed_dict={low: 1.5})
def testCutoffsCanBeFloatValuedIfValidateArgsFalse(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=0., scale=1., validate_args=False),
low=1.5,
high=10.11)
self.assertFalse(qdist.validate_args) # Default is True.
# Should not raise
qdist.sample().eval()
def testDtypeAndShapeInheritedFromBaseDist(self):
batch_shape = (2, 3)
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=array_ops.zeros(batch_shape),
scale=array_ops.zeros(batch_shape)),
low=1.0,
high=10.0)
self.assertEqual(batch_shape, qdist.batch_shape)
self.assertAllEqual(batch_shape, qdist.batch_shape_tensor().eval())
self.assertEqual((), qdist.event_shape)
self.assertAllEqual((), qdist.event_shape_tensor().eval())
samps = qdist.sample(10, seed=42)
self.assertEqual((10,) + batch_shape, samps.get_shape())
self.assertAllEqual((10,) + batch_shape, samps.eval().shape)
y = rng.randint(0, 5, size=batch_shape).astype(np.float32)
self.assertEqual(batch_shape, qdist.prob(y).get_shape())
if __name__ == "__main__":
test.main()
| apache-2.0 |
llange/pynag | tests/test_plugins.py | 2 | 19560 |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
# Make sure we import from working tree
pynagbase = os.path.dirname(os.path.realpath(__file__ + "/.."))
sys.path.insert(0, pynagbase)
import unittest2 as unittest
import time
import pynag.Utils
import pynag.Plugins
import signal
# Some of the methods here print directly to stdout but we
# dont want to spam the output of the unittests. Lets do a temp
# blocking of stdout and stderr
from cStringIO import StringIO
original_stdout = sys.stdout
original_stderr = sys.stderr
class PluginParams(unittest.TestCase):
def setUp(self):
self.argv_store = sys.argv
from pynag.Plugins import simple as Plugin
self.np = Plugin(must_threshold=False)
sys.stdout = StringIO()
def tearDown(self):
sys.argv = [sys.argv[0]]
sys.stdout = original_stdout
def create_params(self, *args):
sys.argv.extend(args)
def test_default_verbose(self):
#sys.argv = [sys.argv[0]] + ['-v', '10']
self.create_params('-v', '10')
self.np.activate()
self.assertEquals(self.np.data['verbosity'], 0)
def test_verbose(self):
self.create_params('-v', '3')
self.np.activate()
self.assertEquals(self.np.data['verbosity'], 3)
def test_set_hostname(self):
self.create_params('-H', 'testhost.example.com')
self.np.activate()
self.assertEquals(self.np.data['host'], 'testhost.example.com')
def test_set_timeout(self):
self.create_params('-t', '100')
self.np.activate()
self.assertEquals(self.np.data['timeout'], '100')
def test_default_timeout(self):
self.np.activate()
self.assertEquals(self.np.data['timeout'], None)
def test_shortname(self):
from pynag.Plugins import simple as Plugin
np = Plugin(shortname='testcase')
self.assertEquals(np.data['shortname'], 'testcase')
class PluginNoThreshold(unittest.TestCase):
def setUp(self):
self.argv_store = sys.argv
from pynag.Plugins import simple as Plugin
self.np = Plugin(must_threshold=False)
sys.stdout = StringIO()
def tearDown(self):
sys.argv = self.argv_store
sys.stdout = original_stdout
def run_expect(self, case, expected_exit, value):
sys.argv = [sys.argv[0]] + case.split()
self.np.activate()
try:
self.np.check_range(value)
except SystemExit as e:
self.assertEquals(type(e), type(SystemExit()))
self.assertEquals(e.code, expected_exit)
except Exception as e:
self.fail('unexpected exception: %s' % e)
else:
self.fail('SystemExit exception expected')
# All tests return OK since thresholds are not required
def test_number_1(self):
case = ''
self.run_expect(case, 0, -23)
def test_number_2(self):
case = ''
self.run_expect(case, 0, 0)
def test_number_3(self):
case = ''
self.run_expect(case, 0, 2)
def test_number_4(self):
case = ''
self.run_expect(case, 0, 10)
def test_number_5(self):
case = ''
self.run_expect(case, 0, 15)
class PluginHelper(unittest.TestCase):
def setUp(self):
self.argv_store = sys.argv
from pynag.Plugins import PluginHelper
self.my_plugin = PluginHelper()
self.my_plugin.parser.add_option('-F',
dest='fakedata',
help='fake data to test thresholds')
sys.stdout = StringIO()
def tearDown(self):
sys.argv = self.argv_store
sys.stdout = original_stdout
def run_expect(self, case, value, expected_exit):
sys.argv = [sys.argv[0]] + case.split() + ('-F %s' % value).split()
self.my_plugin.parse_arguments()
self.my_plugin.add_status(pynag.Plugins.ok)
self.my_plugin.add_summary(self.my_plugin.options.fakedata)
self.my_plugin.add_metric('fakedata', self.my_plugin.options.fakedata)
try:
self.my_plugin.check_all_metrics()
self.my_plugin.exit()
except SystemExit as e:
self.assertEquals(type(e), type(SystemExit()))
self.assertEquals(e.code, expected_exit)
except Exception as e:
self.fail('unexpected exception: %s' % e)
else:
self.fail('SystemExit exception expected')
finally:
signal.alarm(0)
# Critical if "stuff" is over 20, else warn if over 10
# (will be critical if "stuff" is less than 0)
def test_number_1(self):
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, -23, 2)
def test_number_2(self):
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 3, 0)
def test_number_3(self):
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 13, 1)
def test_number_4(self):
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 23, 2)
# Same as above. Negative "stuff" is OK
def test_number_5(self):
case = '--th=metric=fakedata,ok=inf..10,warn=10..20'
self.run_expect(case, '-23', 0)
def test_number_6(self):
case = '--th=metric=fakedata,ok=inf..10,warn=10..20'
self.run_expect(case, '3', 0)
def test_number_7(self):
case = '--th=metric=fakedata,ok=inf..10,warn=10..20'
self.run_expect(case, '13', 1)
def test_number_8(self):
case = '--th=metric=fakedata,ok=inf..10,warn=10..20'
self.run_expect(case, '23', 2)
# Critical if "stuff" is over 20, else warn if "stuff" is below 10
# (will be critical if "stuff" is less than 0)
def test_number_9(self):
case = '--th=metric=fakedata,warn=0..10,crit=20..inf'
self.run_expect(case, '-23', 0)
def test_number_10(self):
case = '--th=metric=fakedata,warn=0..10,crit=20..inf'
self.run_expect(case, '3', 1)
def test_number_11(self):
case = '--th=metric=fakedata,warn=0..10,crit=20..inf'
self.run_expect(case, '13', 0)
def test_number_12(self):
case = '--th=metric=fakedata,warn=0..10,crit=20..inf'
self.run_expect(case, '23', 2)
# Critical if "stuff" is less than 1
def test_number_13(self):
case = '--th=metric=fakedata,ok=1..inf'
self.run_expect(case, '-23', 2)
def test_number_14(self):
case = '--th=metric=fakedata,ok=1..inf'
self.run_expect(case, '0', 2)
def test_number_15(self):
case = '--th=metric=fakedata,ok=1..inf'
self.run_expect(case, '13', 0)
def test_number_16(self):
case = '--th=metric=fakedata,ok=1..inf'
self.run_expect(case, '23', 0)
# 1-9 is warning, negative or above 10 is critical
def test_number_17(self):
case = '--th=metric=fakedata,warn=1..9,crit=^0..10'
self.run_expect(case, '-23', 2)
def test_number_18(self):
case = '--th=metric=fakedata,warn=1..9,crit=^0..10'
self.run_expect(case, '0', 0)
def test_number_19(self):
case = '--th=metric=fakedata,warn=1..9,crit=^0..10'
self.run_expect(case, '7', 1)
def test_number_20(self):
case = '--th=metric=fakedata,warn=1..9,crit=^0..10'
self.run_expect(case, '23', 2)
# The only noncritical range is 5:6
def test_number_21(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '-23', 2)
def test_number_22(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '0', 2)
def test_number_23(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '2', 2)
def test_number_24(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '5', 0)
def test_number_25(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '6', 0)
def test_number_26(self):
case = '--th=metric=fakedata,ok=5..6'
self.run_expect(case, '7', 2)
# Critical if "stuff" is 10 to 20
def test_number_27(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '-23', 0)
def test_number_28(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '0', 0)
def test_number_29(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '2', 0)
def test_number_30(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '10', 2)
def test_number_31(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '15', 2)
def test_number_32(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '20', 2)
def test_number_33(self):
case = '--th=metric=fakedata,ok=^10..20'
self.run_expect(case, '23', 0)
# Cmdline thresholds pass but we insert a "hardcoded" metric with thresholds
# which will also be evaluated
def test_number_34(self):
# Extra case with hardcoded thresholds
self.my_plugin.add_metric('fakedata2', value='15',
warn='0..10', crit='10..inf')
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 3, 2)
def test_number_35(self):
# Extra case with hardcoded thresholds
self.my_plugin.add_metric('fakedata2', value='9',
warn='0..10', crit='10..inf')
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 3, 1)
def test_number_36(self):
# Extra case with hardcoded thresholds
self.my_plugin.add_metric('fakedata2', value='-4',
warn='0..10', crit='10..inf')
case = '--th=metric=fakedata,ok=0..10,warn=10..20'
self.run_expect(case, 3, 0)
def testTimeout(self):
try:
self.my_plugin.set_timeout(1)
time.sleep(1)
self.assertTrue(False, "Code should have timed out by now")
except SystemExit as e:
self.assertEquals(type(e), type(SystemExit()))
self.assertEquals(e.code, pynag.Plugins.unknown)
self.assertTrue(True, "Timeout occured in plugin, just like expected.")
class Plugin(unittest.TestCase):
def setUp(self):
self.argv_store = sys.argv
from pynag.Plugins import simple as Plugin
self.np = Plugin()
sys.stdout = StringIO()
sys.stderr = StringIO()
def tearDown(self):
sys.argv = self.argv_store
sys.stdout = original_stdout
sys.stderr = original_stderr
def run_expect(self, case, expected_exit, value):
sys.argv = [sys.argv[0]] + case.split()
self.np.activate()
try:
self.np.add_perfdata('fake', value, uom='fakes',
warn=10, crit=20, minimum=-100, maximum=100)
perfdata_string = self.np.perfdata_string()
print(perfdata_string)
self.assertEquals(perfdata_string, "| '%s'=%s%s;%s;%s;%s;%s" % (
'fake', value, 'fakes', 10, 20, -100, 100))
self.np.add_message('OK', 'Some message')
self.assertEquals(self.np.data['messages'][0], ['Some message'])
self.np.check_range(value)
except SystemExit as e:
self.assertEquals(type(e), type(SystemExit()))
self.assertEquals(e.code, expected_exit)
except Exception as e:
import traceback
print(traceback.format_exc())
self.fail('unexpected exception: %s' % e)
else:
self.fail('SystemExit exception expected')
# Throws SystemExit, required parameter not set when activating
def test_add_arg_req_missing(self):
self.np.add_arg('F', 'fakedata',
'fake data to test thresholds', required=True)
self.assertRaises(SystemExit, self.np.activate)
def test_add_arg_req(self):
self.np.add_arg('F', 'fakedata',
'fake data to test thresholds', required=True)
sys.argv = [sys.argv[0]] + '-F 100 -w 1 -c 2'.split()
self.np.activate()
def test_add_arg(self):
self.np.add_arg('F', 'fakedata',
'fake data to test thresholds', required=False)
sys.argv = [sys.argv[0]] + '-w 1 -c 2'.split()
self.np.activate()
def test_codestring_to_int(self):
code = self.np.code_string2int('OK')
self.assertEquals(code, 0, "OK did not map to 0")
code = self.np.code_string2int('WARNING')
self.assertEquals(code, 1, "WARNING did not map to 1")
code = self.np.code_string2int('CRITICAL')
self.assertEquals(code, 2, "CRITICAL did not map to 2")
code = self.np.code_string2int('UNKNOWN')
self.assertEquals(code, 3, "UNKNOWN did not map to 3")
# Critical if "stuff" is over 20, else warn if over 10
# (will be critical if "stuff" is less than 0)
def test_number_1(self):
case = '-w 10 -c 20'
self.run_expect(case, 2, -23)
def test_number_2(self):
case = '-w 10 -c 20'
self.run_expect(case, 0, 3)
def test_number_3(self):
case = '-w 10 -c 20'
self.run_expect(case, 1, 13)
def test_number_4(self):
case = '-w 10 -c 20'
self.run_expect(case, 2, 23)
# Same as above. Negative "stuff" is OK
def test_number_5(self):
case = '-w ~:10 -c ~:20'
self.run_expect(case, 0, -23)
def test_number_6(self):
case = '-w ~:10 -c ~:20'
self.run_expect(case, 0, 3)
def test_number_7(self):
case = '-w ~:10 -c ~:20'
self.run_expect(case, 1, 13)
def test_number_8(self):
case = '-w ~:10 -c ~:20'
self.run_expect(case, 2, 23)
# Critical if "stuff" is over 20, else warn if "stuff" is below 10
# (will be critical if "stuff" is less than 0)
def test_number_9(self):
case = '-w 10: -c 20'
self.run_expect(case, 2, -23)
def test_number_10(self):
case = '-w 10: -c 20'
self.run_expect(case, 1, 3)
def test_number_11(self):
case = '-w 10: -c 20'
self.run_expect(case, 0, 13)
def test_number_12(self):
case = '-w 10: -c 20'
self.run_expect(case, 2, 23)
# Critical if "stuff" is less than 1
def test_number_13(self):
case = '-c 1:'
self.run_expect(case, 2, -23)
def test_number_14(self):
case = '-c 1:'
self.run_expect(case, 2, 0)
def test_number_15(self):
case = '-c 1:'
self.run_expect(case, 0, 13)
def test_number_16(self):
case = '-c 1:'
self.run_expect(case, 0, 23)
# 1-9 is warning, negative or above 10 is critical
def test_number_17(self):
case = '-w ~:0 -c 10'
self.run_expect(case, 2, -23)
def test_number_18(self):
case = '-w ~:0 -c 10'
self.run_expect(case, 0, 0)
def test_number_19(self):
case = '-w ~:0 -c 10'
self.run_expect(case, 1, 7)
def test_number_20(self):
case = '-w ~:0 -c 10'
self.run_expect(case, 2, 23)
# The only noncritical range is 5:6
def test_number_21(self):
case = '-c 5:6'
self.run_expect(case, 2, -23)
def test_number_22(self):
case = '-c 5:6'
self.run_expect(case, 2, 0)
def test_number_23(self):
case = '-c 5:6'
self.run_expect(case, 2, 2)
def test_number_24(self):
case = '-c 5:6'
self.run_expect(case, 0, 5)
def test_number_25(self):
case = '-c 5:6'
self.run_expect(case, 0, 6)
# Critical if "stuff" is 10 to 20
def test_number_26(self):
case = '-c @10:20'
self.run_expect(case, 0, -23)
def test_number_27(self):
case = '-c @10:20'
self.run_expect(case, 0, 0)
def test_number_28(self):
case = '-c @10:20'
self.run_expect(case, 0, 2)
def test_number_29(self):
case = '-c @10:20'
self.run_expect(case, 2, 10)
def test_number_30(self):
case = '-c @10:20'
self.run_expect(case, 2, 15)
def test_number_31(self):
case = '-c @10:20'
self.run_expect(case, 2, 20)
def test_number_32(self):
case = '-c @10:20'
self.run_expect(case, 0, 23)
class NewPluginThresholdSyntax(unittest.TestCase):
""" Unit tests for pynag.Plugins.new_threshold_syntax """
def test_check_threshold(self):
""" Test check_threshold() with different parameters
Returns (in order of appearance):
0 - Unknown on invalid input
1 - If no levels are specified, return OK
2 - If an ok level is specified and value is within range, return OK
3 - If a critical level is specified and value is within range, return CRITICAL
4 - If a warning level is specified and value is within range, return WARNING
5 - If an ok level is specified, return CRITICAL
6 - Otherwise return OK
"""
from pynag.Plugins.new_threshold_syntax import check_threshold
from pynag.Plugins import ok, warning, critical, unknown
# 0 - return unknown on invalid input
self.assertEqual(unknown, check_threshold(1, warning='invalid input'))
# 1 - If no levels are specified, return OK
self.assertEqual(ok, check_threshold(1))
# 2 - If an ok level is specified and value is within range, return OK
self.assertEqual(ok, check_threshold(1, ok="0..10"))
self.assertEqual(ok, check_threshold(1, ok="0..10", warning="0..10"))
self.assertEqual(ok, check_threshold(1, ok="0..10", critical="0..10"))
# 3 - If a critical level is specified and value is within range, return CRITICAL
self.assertEqual(critical, check_threshold(1, critical="0..10"))
# 4 - If a warning level is specified and value is within range, return WARNING
self.assertEqual(warning, check_threshold(1, warning="0..10"))
# 5 - If an ok level is specified, return CRITICAL
self.assertEqual(critical, check_threshold(1, ok="10..20"))
# 6 - Otherwise return OK
# ... we pass only warning, then only critical, then both, but value is always outside ranges
self.assertEqual(ok, check_threshold(1, warning="10..20"))
self.assertEqual(ok, check_threshold(1, critical="10..20"))
self.assertEqual(ok, check_threshold(1, warning="10..20", critical="20..30"))
def test_invalid_range(self):
from pynag.Plugins.new_threshold_syntax import check_range
from pynag.Utils import PynagError
self.assertRaises(PynagError, check_range, 1, '')
self.assertRaises(PynagError, check_range, 1, None)
def test_invalid_threshold(self):
from pynag.Plugins.new_threshold_syntax import parse_threshold
from pynag.Utils import PynagError
self.assertRaises(PynagError, parse_threshold, '')
self.assertRaises(AttributeError, parse_threshold, None)
self.assertRaises(PynagError, parse_threshold, 'string')
if __name__ == "__main__":
unittest.main()
# vim: sts=4 expandtab autoindent
| gpl-2.0 |
jmakov/ggrc-core | src/ggrc_basic_permissions/roles/ProgramAuditEditor.py | 2 | 2313 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit Implied"
description = """
A user with the ProgramEditor role for a private program will also have this
role in the audit context for any audit created for that program.
"""
permissions = {
"read": [
"Request",
"Comment",
"Assessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"AuditObject",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"Comment",
"Assessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"Response",
"AuditObject"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
{
"type": "Assessment",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Issue",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
"Request",
"Comment",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"AuditObject",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"ObjectControl",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"Audit",
"AuditObject"
]
}
| apache-2.0 |
Juniper/contrail-dev-neutron | neutron/plugins/ml2/drivers/mech_linuxbridge.py | 26 | 2340 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using linuxbridge L2 agent.
The LinuxbridgeMechanismDriver integrates the ml2 plugin with the
linuxbridge L2 agent. Port binding with this driver requires the
linuxbridge agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(LinuxbridgeMechanismDriver, self).__init__(
constants.AGENT_TYPE_LINUXBRIDGE,
portbindings.VIF_TYPE_BRIDGE,
{portbindings.CAP_PORT_FILTER: True})
def check_segment_for_agent(self, segment, agent):
mappings = agent['configurations'].get('interface_mappings', {})
tunnel_types = agent['configurations'].get('tunnel_types', [])
LOG.debug(_("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with tunnel_types: %(tunnel_types)s"),
{'segment': segment, 'mappings': mappings,
'tunnel_types': tunnel_types})
network_type = segment[api.NETWORK_TYPE]
if network_type == 'local':
return True
elif network_type in tunnel_types:
return True
elif network_type in ['flat', 'vlan']:
return segment[api.PHYSICAL_NETWORK] in mappings
else:
return False
| apache-2.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/unidecode/x0c7.py | 253 | 4564 | data = (
'wek', # 0x00
'wet', # 0x01
'wep', # 0x02
'weh', # 0x03
'wi', # 0x04
'wig', # 0x05
'wigg', # 0x06
'wigs', # 0x07
'win', # 0x08
'winj', # 0x09
'winh', # 0x0a
'wid', # 0x0b
'wil', # 0x0c
'wilg', # 0x0d
'wilm', # 0x0e
'wilb', # 0x0f
'wils', # 0x10
'wilt', # 0x11
'wilp', # 0x12
'wilh', # 0x13
'wim', # 0x14
'wib', # 0x15
'wibs', # 0x16
'wis', # 0x17
'wiss', # 0x18
'wing', # 0x19
'wij', # 0x1a
'wic', # 0x1b
'wik', # 0x1c
'wit', # 0x1d
'wip', # 0x1e
'wih', # 0x1f
'yu', # 0x20
'yug', # 0x21
'yugg', # 0x22
'yugs', # 0x23
'yun', # 0x24
'yunj', # 0x25
'yunh', # 0x26
'yud', # 0x27
'yul', # 0x28
'yulg', # 0x29
'yulm', # 0x2a
'yulb', # 0x2b
'yuls', # 0x2c
'yult', # 0x2d
'yulp', # 0x2e
'yulh', # 0x2f
'yum', # 0x30
'yub', # 0x31
'yubs', # 0x32
'yus', # 0x33
'yuss', # 0x34
'yung', # 0x35
'yuj', # 0x36
'yuc', # 0x37
'yuk', # 0x38
'yut', # 0x39
'yup', # 0x3a
'yuh', # 0x3b
'eu', # 0x3c
'eug', # 0x3d
'eugg', # 0x3e
'eugs', # 0x3f
'eun', # 0x40
'eunj', # 0x41
'eunh', # 0x42
'eud', # 0x43
'eul', # 0x44
'eulg', # 0x45
'eulm', # 0x46
'eulb', # 0x47
'euls', # 0x48
'eult', # 0x49
'eulp', # 0x4a
'eulh', # 0x4b
'eum', # 0x4c
'eub', # 0x4d
'eubs', # 0x4e
'eus', # 0x4f
'euss', # 0x50
'eung', # 0x51
'euj', # 0x52
'euc', # 0x53
'euk', # 0x54
'eut', # 0x55
'eup', # 0x56
'euh', # 0x57
'yi', # 0x58
'yig', # 0x59
'yigg', # 0x5a
'yigs', # 0x5b
'yin', # 0x5c
'yinj', # 0x5d
'yinh', # 0x5e
'yid', # 0x5f
'yil', # 0x60
'yilg', # 0x61
'yilm', # 0x62
'yilb', # 0x63
'yils', # 0x64
'yilt', # 0x65
'yilp', # 0x66
'yilh', # 0x67
'yim', # 0x68
'yib', # 0x69
'yibs', # 0x6a
'yis', # 0x6b
'yiss', # 0x6c
'ying', # 0x6d
'yij', # 0x6e
'yic', # 0x6f
'yik', # 0x70
'yit', # 0x71
'yip', # 0x72
'yih', # 0x73
'i', # 0x74
'ig', # 0x75
'igg', # 0x76
'igs', # 0x77
'in', # 0x78
'inj', # 0x79
'inh', # 0x7a
'id', # 0x7b
'il', # 0x7c
'ilg', # 0x7d
'ilm', # 0x7e
'ilb', # 0x7f
'ils', # 0x80
'ilt', # 0x81
'ilp', # 0x82
'ilh', # 0x83
'im', # 0x84
'ib', # 0x85
'ibs', # 0x86
'is', # 0x87
'iss', # 0x88
'ing', # 0x89
'ij', # 0x8a
'ic', # 0x8b
'ik', # 0x8c
'it', # 0x8d
'ip', # 0x8e
'ih', # 0x8f
'ja', # 0x90
'jag', # 0x91
'jagg', # 0x92
'jags', # 0x93
'jan', # 0x94
'janj', # 0x95
'janh', # 0x96
'jad', # 0x97
'jal', # 0x98
'jalg', # 0x99
'jalm', # 0x9a
'jalb', # 0x9b
'jals', # 0x9c
'jalt', # 0x9d
'jalp', # 0x9e
'jalh', # 0x9f
'jam', # 0xa0
'jab', # 0xa1
'jabs', # 0xa2
'jas', # 0xa3
'jass', # 0xa4
'jang', # 0xa5
'jaj', # 0xa6
'jac', # 0xa7
'jak', # 0xa8
'jat', # 0xa9
'jap', # 0xaa
'jah', # 0xab
'jae', # 0xac
'jaeg', # 0xad
'jaegg', # 0xae
'jaegs', # 0xaf
'jaen', # 0xb0
'jaenj', # 0xb1
'jaenh', # 0xb2
'jaed', # 0xb3
'jael', # 0xb4
'jaelg', # 0xb5
'jaelm', # 0xb6
'jaelb', # 0xb7
'jaels', # 0xb8
'jaelt', # 0xb9
'jaelp', # 0xba
'jaelh', # 0xbb
'jaem', # 0xbc
'jaeb', # 0xbd
'jaebs', # 0xbe
'jaes', # 0xbf
'jaess', # 0xc0
'jaeng', # 0xc1
'jaej', # 0xc2
'jaec', # 0xc3
'jaek', # 0xc4
'jaet', # 0xc5
'jaep', # 0xc6
'jaeh', # 0xc7
'jya', # 0xc8
'jyag', # 0xc9
'jyagg', # 0xca
'jyags', # 0xcb
'jyan', # 0xcc
'jyanj', # 0xcd
'jyanh', # 0xce
'jyad', # 0xcf
'jyal', # 0xd0
'jyalg', # 0xd1
'jyalm', # 0xd2
'jyalb', # 0xd3
'jyals', # 0xd4
'jyalt', # 0xd5
'jyalp', # 0xd6
'jyalh', # 0xd7
'jyam', # 0xd8
'jyab', # 0xd9
'jyabs', # 0xda
'jyas', # 0xdb
'jyass', # 0xdc
'jyang', # 0xdd
'jyaj', # 0xde
'jyac', # 0xdf
'jyak', # 0xe0
'jyat', # 0xe1
'jyap', # 0xe2
'jyah', # 0xe3
'jyae', # 0xe4
'jyaeg', # 0xe5
'jyaegg', # 0xe6
'jyaegs', # 0xe7
'jyaen', # 0xe8
'jyaenj', # 0xe9
'jyaenh', # 0xea
'jyaed', # 0xeb
'jyael', # 0xec
'jyaelg', # 0xed
'jyaelm', # 0xee
'jyaelb', # 0xef
'jyaels', # 0xf0
'jyaelt', # 0xf1
'jyaelp', # 0xf2
'jyaelh', # 0xf3
'jyaem', # 0xf4
'jyaeb', # 0xf5
'jyaebs', # 0xf6
'jyaes', # 0xf7
'jyaess', # 0xf8
'jyaeng', # 0xf9
'jyaej', # 0xfa
'jyaec', # 0xfb
'jyaek', # 0xfc
'jyaet', # 0xfd
'jyaep', # 0xfe
'jyaeh', # 0xff
)
| bsd-3-clause |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/tools/testrunner/server/presence_handler.py | 123 | 4443 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import SocketServer
import threading
try:
import ujson as json
except:
import json
from . import constants
from ..objects import peer
STARTUP_REQUEST = "V8 test peer starting up"
STARTUP_RESPONSE = "Let's rock some tests!"
EXIT_REQUEST = "V8 testing peer going down"
def GetOwnIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
class PresenceHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = json.loads(self.request[0].strip())
if data[0] == STARTUP_REQUEST:
jobs = data[1]
relative_perf = data[2]
pubkey_fingerprint = data[3]
trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
response = [STARTUP_RESPONSE, self.server.daemon.jobs,
self.server.daemon.relative_perf,
self.server.daemon.pubkey_fingerprint, trusted]
response = json.dumps(response)
self.server.SendTo(self.client_address[0], response)
p = peer.Peer(self.client_address[0], jobs, relative_perf,
pubkey_fingerprint)
p.trusted = trusted
self.server.daemon.AddPeer(p)
elif data[0] == STARTUP_RESPONSE:
jobs = data[1]
perf = data[2]
pubkey_fingerprint = data[3]
p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint)
p.trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
p.trusting_me = data[4]
self.server.daemon.AddPeer(p)
elif data[0] == EXIT_REQUEST:
self.server.daemon.DeletePeer(self.client_address[0])
if self.client_address[0] == self.server.daemon.ip:
self.server.shutdown_lock.release()
class PresenceDaemon(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
def __init__(self, daemon):
self.daemon = daemon
address = (daemon.ip, constants.PRESENCE_PORT)
SocketServer.UDPServer.__init__(self, address, PresenceHandler)
self.shutdown_lock = threading.Lock()
def shutdown(self):
self.shutdown_lock.acquire()
self.SendToAll(json.dumps([EXIT_REQUEST]))
self.shutdown_lock.acquire()
self.shutdown_lock.release()
SocketServer.UDPServer.shutdown(self)
def SendTo(self, target, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message, (target, constants.PRESENCE_PORT))
sock.close()
def SendToAll(self, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = self.daemon.ip.split(".")
for i in range(1, 254):
ip[-1] = str(i)
sock.sendto(message, (".".join(ip), constants.PRESENCE_PORT))
sock.close()
def FindPeers(self):
request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
self.daemon.pubkey_fingerprint]
request = json.dumps(request)
self.SendToAll(request)
| apache-2.0 |
wbsoft/frescobaldi | frescobaldi_app/quickinsert/widget.py | 3 | 4449 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The Quick Insert panel widget.
"""
import weakref
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import (QComboBox, QHBoxLayout, QLabel, QToolBox,
QToolButton, QVBoxLayout, QWidget)
import app
import userguide.util
import icons
import symbols
import gadgets.toolboxwheeler
from . import articulations
from . import barlines
from . import dynamics
from . import spanners
class QuickInsert(QWidget):
def __init__(self, dockwidget):
super(QuickInsert, self).__init__(dockwidget)
self._dockwidget = weakref.ref(dockwidget)
# filled in by ButtonGroup subclasses
self.actionDict = {}
layout = QVBoxLayout()
self.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
self.helpButton = QToolButton(
icon = icons.get("help-contents"),
autoRaise = True,
clicked = lambda: userguide.show("quickinsert"))
self.directionLabel = QLabel()
self.direction = QComboBox()
self.direction.addItems(['', '', ''])
self.direction.setItemIcon(0, icons.get("go-up"))
self.direction.setItemIcon(2, icons.get("go-down"))
self.direction.setCurrentIndex(1)
hor = QHBoxLayout()
hor.setContentsMargins(0, 0, 0, 0)
hor.addWidget(self.helpButton)
hor.addWidget(self.directionLabel)
hor.addWidget(self.direction)
layout.addLayout(hor)
self.toolbox = QToolBox(self)
gadgets.toolboxwheeler.ToolBoxWheeler(self.toolbox)
layout.addWidget(self.toolbox)
for cls in (
articulations.Articulations,
dynamics.Dynamics,
spanners.Spanners,
barlines.BarLines,
):
widget = cls(self)
self.toolbox.addItem(widget, widget.icon(), '')
app.translateUI(self)
userguide.openWhatsThis(self)
# restore remembered current page
name = QSettings().value("quickinsert/current_tool", "", str)
if name:
for i in range(self.toolbox.count()):
if name == self.toolbox.widget(i).__class__.__name__.lower():
self.toolbox.setCurrentIndex(i)
break
self.toolbox.currentChanged.connect(self.slotCurrentChanged)
def slotCurrentChanged(self, index):
name = self.toolbox.widget(index).__class__.__name__.lower()
QSettings().setValue("quickinsert/current_tool", name)
def translateUI(self):
self.setWhatsThis(_(
"<p>With the Quick Insert Panel you can add various music "
"elements to the current note or selected music.</p>\n"
"<p>See {link} for more information.</p>").format(link=
userguide.util.format_link("quickinsert")))
self.helpButton.setToolTip(_("Help"))
self.directionLabel.setText(_("Direction:"))
for item, text in enumerate((_("Up"), _("Neutral"), _("Down"))):
self.direction.setItemText(item, text)
for i in range(self.toolbox.count()):
self.toolbox.setItemText(i, self.toolbox.widget(i).title())
self.toolbox.setItemToolTip(i, self.toolbox.widget(i).tooltip())
def actionForName(self, name):
"""This is called by the ShortcutCollection of our dockwidget, e.g. if the user presses a key."""
try:
return self.actionDict[name]
except KeyError:
pass
def dockwidget(self):
return self._dockwidget()
| gpl-2.0 |
cszipper/PyAIML3 | build/lib.linux-x86_64-2.7/aiml/DefaultSubs.py | 14 | 3743 | """This file contains the default (English) substitutions for the
PyAIML kernel. These substitutions may be overridden by using the
Kernel.loadSubs(filename) method. The filename specified should refer
to a Windows-style INI file with the following format:
# lines that start with '#' are comments
# The 'gender' section contains the substitutions performed by the
# <gender> AIML tag, which swaps masculine and feminine pronouns.
[gender]
he = she
she = he
# and so on...
# The 'person' section contains the substitutions performed by the
# <person> AIML tag, which swaps 1st and 2nd person pronouns.
[person]
I = you
you = I
# and so on...
# The 'person2' section contains the substitutions performed by
# the <person2> AIML tag, which swaps 1st and 3nd person pronouns.
[person2]
I = he
he = I
# and so on...
# the 'normal' section contains subtitutions run on every input
# string passed into Kernel.respond(). It's mainly used to
# correct common misspellings, and to convert contractions
# ("WHAT'S") into a format that will match an AIML pattern ("WHAT
# IS").
[normal]
what's = what is
"""
defaultGender = {
# masculine -> feminine
"he": "she",
"him": "her",
"his": "her",
"himself": "herself",
# feminine -> masculine
"she": "he",
"her": "him",
"hers": "his",
"herself": "himself",
}
defaultPerson = {
# 1st->3rd (masculine)
"I": "he",
"me": "him",
"my": "his",
"mine": "his",
"myself": "himself",
# 3rd->1st (masculine)
"he":"I",
"him":"me",
"his":"my",
"himself":"myself",
# 3rd->1st (feminine)
"she":"I",
"her":"me",
"hers":"mine",
"herself":"myself",
}
defaultPerson2 = {
# 1st -> 2nd
"I": "you",
"me": "you",
"my": "your",
"mine": "yours",
"myself": "yourself",
# 2nd -> 1st
"you": "me",
"your": "my",
"yours": "mine",
"yourself": "myself",
}
# TODO: this list is far from complete
defaultNormal = {
"wanna": "want to",
"gonna": "going to",
"I'm": "I am",
"I'd": "I would",
"I'll": "I will",
"I've": "I have",
"you'd": "you would",
"you're": "you are",
"you've": "you have",
"you'll": "you will",
"he's": "he is",
"he'd": "he would",
"he'll": "he will",
"she's": "she is",
"she'd": "she would",
"she'll": "she will",
"we're": "we are",
"we'd": "we would",
"we'll": "we will",
"we've": "we have",
"they're": "they are",
"they'd": "they would",
"they'll": "they will",
"they've": "they have",
"y'all": "you all",
"can't": "can not",
"cannot": "can not",
"couldn't": "could not",
"wouldn't": "would not",
"shouldn't": "should not",
"isn't": "is not",
"ain't": "is not",
"don't": "do not",
"aren't": "are not",
"won't": "will not",
"weren't": "were not",
"wasn't": "was not",
"didn't": "did not",
"hasn't": "has not",
"hadn't": "had not",
"haven't": "have not",
"where's": "where is",
"where'd": "where did",
"where'll": "where will",
"who's": "who is",
"who'd": "who did",
"who'll": "who will",
"what's": "what is",
"what'd": "what did",
"what'll": "what will",
"when's": "when is",
"when'd": "when did",
"when'll": "when will",
"why's": "why is",
"why'd": "why did",
"why'll": "why will",
"it's": "it is",
"it'd": "it would",
"it'll": "it will",
}
| bsd-2-clause |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/washtenaw/estimation/my_estimation_config.py | 2 | 1595 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from opus_core.database_management.configurations.scenario_database_configuration import ScenarioDatabaseConfiguration
from opus_core.database_management.configurations.estimation_database_configuration import EstimationDatabaseConfiguration
from opus_core.configurations.baseyear_cache_configuration import BaseyearCacheConfiguration
from urbansim.configurations.creating_baseyear_cache_configuration import CreatingBaseyearCacheConfiguration
my_configuration = {
'scenario_database_configuration':ScenarioDatabaseConfiguration(
database_name = "semcog_baseyear", #change
),
'estimation_database_configuration':EstimationDatabaseConfiguration(
database_name = "semcog_baseyear_estimation",
),
'datasets_to_cache_after_each_model':[],
'low_memory_mode':False,
'cache_directory':'/urbansim_cache/semcog/cache_source', # change or leave out
'creating_baseyear_cache_configuration':CreatingBaseyearCacheConfiguration(
unroll_gridcells = True,
cache_from_database = False,
baseyear_cache = BaseyearCacheConfiguration(
existing_cache_to_copy = '/urbansim_cache/semcog/cache_source',
#years_to_cache = range(1996,2001)
),
tables_to_cache = [],
tables_to_cache_nchunks = {'gridcells':1},
tables_to_copy_to_previous_years = {},
),
'base_year': 2005,
'years': (2005,2005),
} | gpl-2.0 |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/sztvhu.py | 71 | 1673 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class SztvHuIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
_TEST = {
'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
'info_dict': {
'id': '20130909',
'ext': 'mp4',
'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_file = self._search_regex(
r'file: "...:(.*?)",', webpage, 'video file')
title = self._html_search_regex(
r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"',
webpage, 'video title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"/>',
webpage, 'video description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_url = 'http://media.sztv.hu/vod/' + video_file
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| gpl-2.0 |
Godiyos/python-for-android | python3-alpha/python3-src/Lib/concurrent/futures/_base.py | 47 | 19248 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import functools
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
super().__init__()
def _decrement_pending_calls(self):
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| apache-2.0 |
haad/ansible | lib/ansible/modules/cloud/misc/virt_net.py | 21 | 18988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: virt_net
author: "Maciej Delmanowski (@drybjed)"
version_added: "2.0"
short_description: Manage libvirt network configuration
description:
- Manage I(libvirt) networks.
options:
name:
required: true
aliases: ['network']
description:
- name of the network being managed. Note that network must be previously
defined with xml.
state:
required: false
choices: [ "active", "inactive", "present", "absent" ]
description:
- specify which state you want a network to be in.
If 'active', network will be started.
If 'present', ensure that network is present but do not change its
state; if it's missing, you need to specify xml argument.
If 'inactive', network will be stopped.
If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
command:
required: false
choices: [ "define", "create", "start", "stop", "destroy",
"undefine", "get_xml", "list_nets", "facts",
"info", "status", "modify"]
description:
- in addition to state management, various non-idempotent commands are available.
See examples.
Modify was added in version 2.1
autostart:
required: false
choices: ["yes", "no"]
description:
- Specify if a given storage pool should be started automatically on system boot.
uri:
required: false
default: "qemu:///system"
description:
- libvirt connection uri.
xml:
required: false
description:
- XML document used with the define command.
requirements:
- "python >= 2.6"
- "python-libvirt"
- "python-lxml"
'''
EXAMPLES = '''
# Define a new network
- virt_net:
command: define
name: br_nat
xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
# Start a network
- virt_net:
command: create
name: br_nat
# List available networks
- virt_net:
command: list_nets
# Get XML data of a specified network
- virt_net:
command: get_xml
name: br_nat
# Stop a network
- virt_net:
command: destroy
name: br_nat
# Undefine a network
- virt_net:
command: undefine
name: br_nat
# Gather facts about networks
# Facts will be available as 'ansible_libvirt_networks'
- virt_net:
command: facts
# Gather information about network managed by 'libvirt' remotely using uri
- virt_net:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: networks
# Ensure that a network is active (needs to be defined and built first)
- virt_net:
state: active
name: br_nat
# Ensure that a network is inactive
- virt_net:
state: inactive
name: br_nat
# Ensure that a given network will be started at boot
- virt_net:
autostart: yes
name: br_nat
# Disable autostart for a given network
- virt_net:
autostart: no
name: br_nat
'''
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
try:
from lxml import etree
except ImportError:
HAS_XML = False
else:
HAS_XML = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
'undefine', 'destroy', 'get_xml', 'define',
'modify']
HOST_COMMANDS = ['list_nets', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0: "inactive",
1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0: "no",
1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0: "no",
1: "yes"
}
class EntryNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_entry(self, entryid):
# entryid = -1 returns a list of everything
results = []
# Get active entries
for name in self.conn.listNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
# Get inactive entries
for name in self.conn.listDefinedNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
if entryid == -1:
return results
for entry in results:
if entry.name() == entryid:
return entry
raise EntryNotFound("network %s not found" % entryid)
def create(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).create()
else:
try:
state = self.find_entry(entryid).isActive()
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def modify(self, entryid, xml):
network = self.find_entry(entryid)
# identify what type of entry is given in the xml
new_data = etree.fromstring(xml)
old_data = etree.fromstring(network.XMLDesc(0))
if new_data.tag == 'host':
mac_addr = new_data.get('mac')
hosts = old_data.xpath('/network/ip/dhcp/host')
# find the one mac we're looking for
host = None
for h in hosts:
if h.get('mac') == mac_addr:
host = h
break
if host is None:
# add the host
if not self.module.check_mode:
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
if res == 0:
return True
else:
# change the host
if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
return False
else:
if not self.module.check_mode:
res = network.update(libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
if res == 0:
return True
# command, section, parentIndex, xml, flags=0
self.module.fail_json(msg='updating this is not supported yet %s' % to_native(xml))
def destroy(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).destroy()
else:
if self.find_entry(entryid).isActive():
return self.module.exit_json(changed=True)
def undefine(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).undefine()
else:
if not self.find_entry(entryid):
return self.module.exit_json(changed=True)
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
def get_xml(self, entryid):
return self.find_entry(entryid).XMLDesc(0)
def get_forward(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/forward')[0].get('mode')
except:
raise ValueError('Forward mode not specified')
return result
def get_domain(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/domain')[0].get('name')
except:
raise ValueError('Domain not specified')
return result
def get_macaddress(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/mac')[0].get('address')
except:
raise ValueError('MAC address not specified')
return result
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).autostart()
else:
try:
return self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
def set_autostart(self, entryid, val):
if not self.module.check_mode:
return self.find_entry(entryid).setAutostart(val)
else:
try:
state = self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
if bool(state) != val:
return self.module.exit_json(changed=True)
def get_bridge(self, entryid):
return self.find_entry(entryid).bridgeName()
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def get_dhcp_leases(self, entryid):
network = self.find_entry(entryid)
return network.DHCPLeases()
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
return self.conn.networkDefineXML(xml)
else:
try:
self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
class VirtNetwork(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
self.conn = LibvirtConnection(self.uri, self.module)
def get_net(self, entryid):
return self.conn.find_entry(entryid)
def list_nets(self, state=None):
results = []
for entry in self.conn.find_entry(-1):
if state:
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(entry.name())
return results
def state(self):
results = []
for entry in self.list_nets():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
return self.conn.set_autostart(entryid, True)
def get_autostart(self, entryid):
return self.conn.get_autostart2(entryid)
def set_autostart(self, entryid, state):
return self.conn.set_autostart(entryid, state)
def create(self, entryid):
return self.conn.create(entryid)
def modify(self, entryid, xml):
return self.conn.modify(entryid, xml)
def start(self, entryid):
return self.conn.create(entryid)
def stop(self, entryid):
return self.conn.destroy(entryid)
def destroy(self, entryid):
return self.conn.destroy(entryid)
def undefine(self, entryid):
return self.conn.undefine(entryid)
def status(self, entryid):
return self.conn.get_status(entryid)
def get_xml(self, entryid):
return self.conn.get_xml(entryid)
def define(self, entryid, xml):
return self.conn.define_from_xml(entryid, xml)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
results = dict()
for entry in self.list_nets():
results[entry] = dict()
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
results[entry]["state"] = self.conn.get_status(entry)
results[entry]["bridge"] = self.conn.get_bridge(entry)
results[entry]["uuid"] = self.conn.get_uuid(entry)
try:
results[entry]["dhcp_leases"] = self.conn.get_dhcp_leases(entry)
# not supported on RHEL 6
except AttributeError:
pass
try:
results[entry]["forward_mode"] = self.conn.get_forward(entry)
except ValueError:
pass
try:
results[entry]["domain"] = self.conn.get_domain(entry)
except ValueError:
pass
try:
results[entry]["macaddress"] = self.conn.get_macaddress(entry)
except ValueError:
pass
facts = dict()
if facts_mode == 'facts':
facts["ansible_facts"] = dict()
facts["ansible_facts"]["ansible_libvirt_networks"] = results
elif facts_mode == 'info':
facts['networks'] = results
return facts
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
v = VirtNetwork(uri, module)
res = {}
if state and command == 'list_nets':
res = v.list_nets(state=state)
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in ['present']:
try:
v.get_net(name)
except EntryNotFound:
if not xml:
module.fail_json(msg="network '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in ['inactive']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in ['undefined', 'absent']:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg="%s requires 1 argument: name" % command)
if command in ('define', 'modify'):
if not xml:
module.fail_json(msg=command + " requires xml argument")
try:
v.get_net(name)
except EntryNotFound:
v.define(name, xml)
res = {'changed': True, 'created': name}
else:
if command == 'modify':
mod = v.modify(name, xml)
res = {'changed': mod, 'modified': name}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % command)
if autostart is not None:
if not name:
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
if not v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, True)
else:
if v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, False)
return VIRT_SUCCESS, res
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['network']),
state=dict(choices=['active', 'inactive', 'present', 'absent']),
command=dict(choices=ALL_COMMANDS),
uri=dict(default='qemu:///system'),
xml=dict(),
autostart=dict(type='bool')
),
supports_check_mode=True
)
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
if not HAS_XML:
module.fail_json(
msg='The `lxml` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mezz64/home-assistant | homeassistant/components/eafm/sensor.py | 10 | 5296 | """Support for guages from flood monitoring API."""
from datetime import timedelta
import logging
from aioeafm import get_station
import async_timeout
from homeassistant.const import ATTR_ATTRIBUTION, LENGTH_METERS
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
UNIT_MAPPING = {
"http://qudt.org/1.1/vocab/unit#Meter": LENGTH_METERS,
}
def get_measures(station_data):
"""Force measure key to always be a list."""
if "measures" not in station_data:
return []
if isinstance(station_data["measures"], dict):
return [station_data["measures"]]
return station_data["measures"]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up UK Flood Monitoring Sensors."""
station_key = config_entry.data["station"]
session = async_get_clientsession(hass=hass)
measurements = set()
async def async_update_data():
# DataUpdateCoordinator will handle aiohttp ClientErrors and timouts
async with async_timeout.timeout(30):
data = await get_station(session, station_key)
measures = get_measures(data)
entities = []
# Look to see if payload contains new measures
for measure in measures:
if measure["@id"] in measurements:
continue
if "latestReading" not in measure:
# Don't create a sensor entity for a gauge that isn't available
continue
entities.append(Measurement(hass.data[DOMAIN][station_key], measure["@id"]))
measurements.add(measure["@id"])
async_add_entities(entities)
# Turn data.measures into a dict rather than a list so easier for entities to
# find themselves.
data["measures"] = {measure["@id"]: measure for measure in measures}
return data
hass.data[DOMAIN][station_key] = coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="sensor",
update_method=async_update_data,
update_interval=timedelta(seconds=15 * 60),
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
class Measurement(CoordinatorEntity):
"""A gauge at a flood monitoring station."""
attribution = "This uses Environment Agency flood and river level data from the real-time data API"
def __init__(self, coordinator, key):
"""Initialise the gauge with a data instance and station."""
super().__init__(coordinator)
self.key = key
@property
def station_name(self):
"""Return the station name for the measure."""
return self.coordinator.data["label"]
@property
def station_id(self):
"""Return the station id for the measure."""
return self.coordinator.data["measures"][self.key]["stationReference"]
@property
def qualifier(self):
"""Return the qualifier for the station."""
return self.coordinator.data["measures"][self.key]["qualifier"]
@property
def parameter_name(self):
"""Return the parameter name for the station."""
return self.coordinator.data["measures"][self.key]["parameterName"]
@property
def name(self):
"""Return the name of the gauge."""
return f"{self.station_name} {self.parameter_name} {self.qualifier}"
@property
def unique_id(self):
"""Return the unique id of the gauge."""
return self.key
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, "measure-id", self.station_id)},
"name": self.name,
"manufacturer": "https://environment.data.gov.uk/",
"model": self.parameter_name,
"entry_type": "service",
}
@property
def available(self) -> bool:
"""Return True if entity is available."""
if not self.coordinator.last_update_success:
return False
# If sensor goes offline it will no longer contain a reading
if "latestReading" not in self.coordinator.data["measures"][self.key]:
return False
# Sometimes lastestReading key is present but actually a URL rather than a piece of data
# This is usually because the sensor has been archived
if not isinstance(
self.coordinator.data["measures"][self.key]["latestReading"], dict
):
return False
return True
@property
def unit_of_measurement(self):
"""Return units for the sensor."""
measure = self.coordinator.data["measures"][self.key]
if "unit" not in measure:
return None
return UNIT_MAPPING.get(measure["unit"], measure["unitName"])
@property
def device_state_attributes(self):
"""Return the sensor specific state attributes."""
return {ATTR_ATTRIBUTION: self.attribution}
@property
def state(self):
"""Return the current sensor value."""
return self.coordinator.data["measures"][self.key]["latestReading"]["value"]
| apache-2.0 |
SUSE-Cloud/glance | glance/api/v2/router.py | 3 | 4823 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.api.v2 import image_data
from glance.api.v2 import image_members
from glance.api.v2 import image_tags
from glance.api.v2 import images
from glance.api.v2 import schemas
from glance.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v2 API requests."""
def __init__(self, mapper):
custom_image_properties = images.load_custom_properties()
schemas_resource = schemas.create_resource(custom_image_properties)
mapper.connect('/schemas/image',
controller=schemas_resource,
action='image',
conditions={'method': ['GET']})
mapper.connect('/schemas/images',
controller=schemas_resource,
action='images',
conditions={'method': ['GET']})
mapper.connect('/schemas/member',
controller=schemas_resource,
action='member',
conditions={'method': ['GET']})
mapper.connect('/schemas/members',
controller=schemas_resource,
action='members',
conditions={'method': ['GET']})
images_resource = images.create_resource(custom_image_properties)
mapper.connect('/images',
controller=images_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/images',
controller=images_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/images/{image_id}',
controller=images_resource,
action='update',
conditions={'method': ['PATCH']})
mapper.connect('/images/{image_id}',
controller=images_resource,
action='show',
conditions={'method': ['GET']})
mapper.connect('/images/{image_id}',
controller=images_resource,
action='delete',
conditions={'method': ['DELETE']})
image_data_resource = image_data.create_resource()
mapper.connect('/images/{image_id}/file',
controller=image_data_resource,
action='download',
conditions={'method': ['GET']})
mapper.connect('/images/{image_id}/file',
controller=image_data_resource,
action='upload',
conditions={'method': ['PUT']})
image_tags_resource = image_tags.create_resource()
mapper.connect('/images/{image_id}/tags/{tag_value}',
controller=image_tags_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/images/{image_id}/tags/{tag_value}',
controller=image_tags_resource,
action='delete',
conditions={'method': ['DELETE']})
image_members_resource = image_members.create_resource()
mapper.connect('/images/{image_id}/members',
controller=image_members_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/images/{image_id}/members/{member_id}',
controller=image_members_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/images/{image_id}/members',
controller=image_members_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/images/{image_id}/members/{member_id}',
controller=image_members_resource,
action='delete',
conditions={'method': ['DELETE']})
super(API, self).__init__(mapper)
| apache-2.0 |
jdstregz/sky-scraper | prototypes/prototypeGoogle/prototypeGoogle/spiders/google_splider_v2.py | 1 | 2718 | import scrapy
import csv
import psycopg2
import re
from scrapy_splash import SplashRequest
class GoogleSpider(scrapy.Spider):
name = "googlev2"
def start_requests(self):
urls = [
'https://cloud.google.com/compute/pricing',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
#yield SplashRequest(url, self.parse, args={'wait':0.5})
def parse(self, response):
# create file names for the outputted csv files (will remove after database implementation)
page = response.url.split("/")
filename = 'google-%s.csv' % page
# attempt connection to the postgresql database
try:
conn = psycopg2.connect("dbname='testdb' user='docker' host='localhost' password='docker'")
except:
print "UNABLE TO CONNECT TO DATABASE. IS YOUR DOCKER RUNNING? HMMM"
# this is a test to see if table output can happen
cur = conn.cursor()
cur.execute("""SELECT * from test""")
rows = cur.fetchall()
print "\nShow me the databases:\n"
for row in rows:
print " ", row[1]
# open the csvfile before scrape parsing
with open(filename, 'wb') as csvfile:
tablewrite = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
# search through all tables in the page
tables = response.xpath('//table')
# create an htmlxpathselector
hxs = scrapy.selector.HtmlXPathSelector(response)
# Extract all data for each table and place in list
for table in tables:
for header in table.xpath('.//thead'):
header_array = []
i = 0
# print out headings and append to header_array
for header in table.xpath('.//th/text()').extract():
print(header, i)
header_array.append(header)
i += 1
# write heading row to csv file
tablewrite.writerow(header_array)
# go through table body
for body in table.xpath('.//tbody'):
for row in body.xpath('//tr'):
j = 0
row_array = []
for item in row.xpath('.//td').extract():
output = re.findall(r'us-hourly=\"(.+?)\"', item)
print output
print(item, j)
row_array.append(item)
j += 1
tablewrite.writerow(row_array)
| mit |
Priyansh2/test | ltrc/extractor/classification/test.py | 1 | 7909 | # -*- coding: utf-8 -*-
#! /usr/bin/env python3
from gensim import corpora, models
import gensim
from operator import itemgetter
import numpy as np
import sys
import os
import re
import codecs
import io
import math
from scipy import sparse
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.base import TransformerMixin
from sklearn import svm
from sklearn import metrics
from sklearn.pipeline import make_pipeline , Pipeline
reload(sys)
sys.setdefaultencoding('utf8')
np.set_printoptions(threshold='nan')
suffixes = {
1: ["ो", "े", "ू", "ु", "ी", "ि", "ा"],
2: ["कर", "ाओ", "िए", "ाई", "ाए", "ने", "नी", "ना", "ते", "ीं", "ती", "ता", "ाँ", "ां", "ों", "ें"],
3: ["ाकर", "ाइए", "ाईं", "ाया", "ेगी", "ेगा", "ोगी", "ोगे", "ाने", "ाना", "ाते", "ाती", "ाता", "तीं", "ाओं", "ाएं", "ुओं", "ुएं", "ुआं"],
4: ["ाएगी", "ाएगा", "ाओगी", "ाओगे", "एंगी", "ेंगी", "एंगे", "ेंगे", "ूंगी", "ूंगा", "ातीं", "नाओं", "नाएं", "ताओं", "ताएं", "ियाँ", "ियों", "ियां"],
5: ["ाएंगी", "ाएंगे", "ाऊंगी", "ाऊंगा", "ाइयाँ", "ाइयों", "ाइयां"],
}
categories=['A','C','D','E']
mappings={}
mappings['A']=1
mappings['C']=3
mappings['D']=4
mappings['E']=5
path='/home/priyansh/Downloads/ltrc/1055/'
train_data_path='/home/priyansh/Downloads/ltrc/extractor/clustering/four_class_devanagari/'
path1=train_data_path+"A/"
path2=train_data_path+"C/"
path3=train_data_path+"D/"
path4=train_data_path+"E/"
documents=[] #contains all doc filenames along with class labels
doc_info_with_label=[] #two tuple storage of doc info along with their respective labels
def hi_stem(word):
for L in 5, 4, 3, 2, 1:
if len(word) > L + 1:
for suf in suffixes[L]:
if word.endswith(suf):
return word[:-L]
return word
def store_data(dir_path_list):
for dir_path in dir_path_list:
class_name = dir_path.split("/")[8]
for filename in os.listdir(dir_path):
if filename not in documents:
documents.append(filename+"+"+str(mappings[class_name]))
infilename=os.path.join(dir_path,filename)
with codecs.open(infilename,'r','utf-8') as fl:
string=''
for line in fl:
for word in line.split():
if word!=" " or word!="\n":
string+=word+" "
fl.close()
temp=[]
temp.append(class_name)
temp.append(string)
doc_info_with_label.append(tuple(temp))
path_list=[]
path_list.append(path1)
path_list.append(path2)
#path_list.append(path3)
#path_list.append(path4)
store_data(path_list)
y = [d[0] for d in doc_info_with_label] #length is no:ofsamples
corpus = [d[1] for d in doc_info_with_label]
class feature_extractor(TransformerMixin):
def __init__(self,*featurizers):
self.featurizers = featurizers
def fit(self,X,y=None):
return self
def transform(self,X):
collection_features=[]
for f in self.featurizers:
collection_features.append(f(X))
feature_vect=np.array(collection_features[0])
if len(collection_features)>1:
for i in range(1,len(collection_features)):
feature_vect=np.concatenate((feature_vect,np.array(collection_features[i])),axis=1)
#print feature_vect.shape
return feature_vect.tolist()
def tfidf_score(word,document_no,corpus_data):
#print word
my_word=word
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
document=corpus_data[document_no]
#print document
wordcount=0
total=0
temp = document.split()
for i in temp:
#print i
if i not in stopwords:
total+=1
if i==my_word:
#print my_word
#print word
wordcount+=1
#print wordcount
#print total
tf = float(wordcount)/total
#print tf
#return tf(word,document)*idf(word,corpus_data)
total_docs = len(corpus_data)
count=0
for doc in corpus_data:
temp=[]
temp = doc.split()
for i in temp:
if i==word:
count+=1
break
total_docs_which_contains_the_words=count
idf = math.log(total_docs/(1+total_docs_which_contains_the_words))
return tf*idf
def tfidf(corpus_data):
word_id_mapping={}
cnt=0
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
unique_words_in_corpus={}
count=0
for data in corpus_data:
corpus_id=count
temp=[]
temp=data.split()
for word in temp:
if word not in unique_words_in_corpus:
unique_words_in_corpus[word]=corpus_id
count+=1
stopped_unique_words_in_corpus={}
for word in unique_words_in_corpus:
if word not in stopwords:
stopped_unique_words_in_corpus[word]=unique_words_in_corpus[word]
word_id_mapping[word]=cnt
cnt+=1
#print unique_words_in_corpus
#print stopped_unique_words_in_corpus
#print word_id_mapping
feature_vect=[None]*len(corpus_data)
#score_vect=[None]*cnt
for i in range(0,len(corpus_data)):
score_vect=[0]*cnt
for word in stopped_unique_words_in_corpus:
if i==stopped_unique_words_in_corpus[word]:
#print word
score=tfidf_score(word,i,corpus_data)
#print score
score_vect[word_id_mapping[word]]=score
feature_vect[i]=score_vect
return feature_vect
def lda(corpus_data):
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
texts=[]
for data in corpus_data:
#print data
tokens=[]
temp=[]
stopped_tokens=[]
temp = data.split()
for word in temp:
tokens.append(word)
#print tokens
for i in tokens:
if i not in stopwords:
stopped_tokens.append(i)
stemmed_tokens=[]
for token in stopped_tokens:
stemmed_token = hi_stem(token)
stemmed_tokens.append(stemmed_token)
texts.append(stemmed_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
num_topics=5
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=10)
doc_topics=[]
for doc_vector in corpus:
doc_topics.append(ldamodel[doc_vector])
for i in range(0,len(doc_topics)):
doc_topics[i] = sorted(doc_topics[i],key=itemgetter(1),reverse=True)
feature_vect=[]
for i in doc_topics:
prob_vect=[0]*num_topics
#print i
topic_num = i[0][0]
topic_prob = i[0][1]
prob_vect[topic_num]=topic_prob
feature_vect.append(prob_vect)
#print i
#print feature_vect
return feature_vect
my_featurizer = feature_extractor(tfidf)
X = my_featurizer.transform(corpus)
#X = sparse.csr_matrix(X)
X_train , X_test , y_train , y_test = train_test_split(X,y,test_size=0.2,random_state=42)
#pipe = make_pipeline(my_featurizer,svm.LinearSVC())
#pipe.fit(X_train,y_train)
#pred = pipe.predict(X_test)
clf = svm.SVC(kernel='linear')
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
print "Expected output\n"
print y_test
print "\n"
print "Output\n"
print pred
print "\n"
score = clf.score(X_test,y_test)
print score
print "\n"
print metrics.confusion_matrix(pred,y_test)
| gpl-3.0 |
chshu/openthread | tests/toranj/test-040-network-data-stable-full.py | 7 | 10584 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Network Data update and version changes (stable only vs. full version).
#
# Network topology
#
# leader
# / | \
# / | \
# / | \
# c1 c2 c3
#
#
# c3 is sleepy-end node and also configured to request stable Network Data only
#
# Test covers the following steps:
# - Adding/removing prefixes (stable or temporary) on c1
# - Verifying that Network Data is updated on all nodes
# - Ensuring correct update to version and stable version
# The above steps are repeated over many different situations:
# - Where the same prefixes are also added by other nodes
# - Or the same prefixes are added as off-mesh routes by other nodes
#
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
def verify_prefix(
node_list,
prefix,
rloc16,
prefix_len=64,
stable=True,
priority='med',
on_mesh=False,
slaac=False,
dhcp=False,
configure=False,
default_route=False,
preferred=False,
):
"""
This function verifies that the `prefix` is present on all the nodes in the `node_list`. It also verifies that the
`prefix` is associated with the given `rloc16` (as an integer).
"""
for node in node_list:
prefixes = wpan.parse_on_mesh_prefix_result(node.get(wpan.WPAN_THREAD_ON_MESH_PREFIXES))
for p in prefixes:
if (p.prefix == prefix and p.origin == "ncp" and int(p.rloc16(), 0) == rloc16):
verify(int(p.prefix_len) == prefix_len)
verify(p.is_stable() == stable)
verify(p.is_on_mesh() == on_mesh)
verify(p.is_def_route() == default_route)
verify(p.is_slaac() == slaac)
verify(p.is_dhcp() == dhcp)
verify(p.is_config() == configure)
verify(p.is_preferred() == preferred)
verify(p.priority == priority)
break
else:
raise wpan.VerifyError("Did not find prefix {} on node {}".format(prefix, node))
def verify_no_prefix(node_list, prefix, rloc16):
"""
This function verifies that none of the nodes in `node_list` contains the on-mesh `prefix` associated with the
given `rloc16`.
"""
for node in node_list:
prefixes = wpan.parse_on_mesh_prefix_result(node.get(wpan.WPAN_THREAD_ON_MESH_PREFIXES))
for p in prefixes:
if (p.prefix == prefix and p.origin == "ncp" and int(p.rloc16(), 0) == rloc16):
raise wpan.VerifyError("Did find prefix {} with rloc {} on node {}".format(prefix, hex(rloc16), node))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 25
wpan.Node.set_time_speedup_factor(speedup)
leader = wpan.Node()
c1 = wpan.Node()
c2 = wpan.Node()
c3 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
#
leader.form("bloodborne") # "fear the old blood"
c1.join_node(leader, wpan.JOIN_TYPE_END_DEVICE)
c2.join_node(leader, wpan.JOIN_TYPE_END_DEVICE)
c3.join_node(leader, wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c3.set(wpan.WPAN_POLL_INTERVAL, '400')
# Clear the "full network data" flag on c3.
c3.set(wpan.WPAN_THREAD_DEVICE_MODE, '-')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
WAIT_TIME = 15
prefix1 = "fd00:1::"
prefix2 = "fd00:2::"
prefix3 = "fd00:3::"
leader_rloc = int(leader.get(wpan.WPAN_THREAD_RLOC16), 0)
c1_rloc = int(c1.get(wpan.WPAN_THREAD_RLOC16), 0)
c2_rloc = int(c2.get(wpan.WPAN_THREAD_RLOC16), 0)
no_rloc = 0xfffe
def test_prefix_add_remove():
# Tests adding and removing stable and temporary prefixes on r1
# Verifies that all nodes in network do see the updates and that
# Network Data version and stable version are updated correctly.
old_version = int(leader.get(wpan.WPAN_THREAD_NETWORK_DATA_VERSION), 0)
old_stable_version = int(leader.get(wpan.WPAN_THREAD_STABLE_NETWORK_DATA_VERSION), 0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Add a stable prefix and check all nodes see the prefix
c1.add_prefix(prefix1, stable=True)
def check_prefix1():
verify_prefix(
[leader, c1, c2],
prefix1,
c1_rloc,
stable=True,
)
verify_prefix(
[c3],
prefix1,
no_rloc,
stable=True,
)
wpan.verify_within(check_prefix1, WAIT_TIME)
new_version = int(leader.get(wpan.WPAN_THREAD_NETWORK_DATA_VERSION), 0)
new_stable_version = int(leader.get(wpan.WPAN_THREAD_STABLE_NETWORK_DATA_VERSION), 0)
verify(new_version == ((old_version + 1) % 256))
verify(new_stable_version == ((old_stable_version + 1) % 256))
old_version = new_version
old_stable_version = new_stable_version
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Add prefix 2 as temp (not stable)
c1.add_prefix(prefix2, stable=False)
def check_prefix2():
verify_prefix(
[leader, c1, c2],
prefix2,
c1_rloc,
stable=False,
)
wpan.verify_within(check_prefix1, WAIT_TIME)
wpan.verify_within(check_prefix2, WAIT_TIME)
new_version = int(leader.get(wpan.WPAN_THREAD_NETWORK_DATA_VERSION), 0)
new_stable_version = int(leader.get(wpan.WPAN_THREAD_STABLE_NETWORK_DATA_VERSION), 0)
verify(new_version == ((old_version + 1) % 256))
verify(new_stable_version == old_stable_version)
old_version = new_version
old_stable_version = new_stable_version
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remove prefix 1
c1.remove_prefix(prefix1)
def check_no_prefix1():
verify_no_prefix([leader, c1, c2], prefix1, c1_rloc)
wpan.verify_within(check_no_prefix1, WAIT_TIME)
wpan.verify_within(check_prefix2, WAIT_TIME)
new_version = int(leader.get(wpan.WPAN_THREAD_NETWORK_DATA_VERSION), 0)
new_stable_version = int(leader.get(wpan.WPAN_THREAD_STABLE_NETWORK_DATA_VERSION), 0)
verify(new_version == ((old_version + 1) % 256))
verify(new_stable_version == ((old_stable_version + 1) % 256))
old_version = new_version
old_stable_version = new_stable_version
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remove prefix 2
c1.remove_prefix(prefix2)
def check_no_prefix2():
verify_no_prefix([leader, c1, c2], prefix2, c1_rloc)
wpan.verify_within(check_no_prefix1, WAIT_TIME)
wpan.verify_within(check_no_prefix2, WAIT_TIME)
new_version = int(leader.get(wpan.WPAN_THREAD_NETWORK_DATA_VERSION), 0)
new_stable_version = int(leader.get(wpan.WPAN_THREAD_STABLE_NETWORK_DATA_VERSION), 0)
verify(new_version == ((old_version + 1) % 256))
verify(new_stable_version == old_stable_version)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Repeat the `test_prefix_add_remove()` under different situations
# where same prefix is added/removed by other nodes in the network
# or added as an off-mesh route.
num_routes = 0
test_prefix_add_remove()
leader.add_prefix(prefix1, stable=False)
test_prefix_add_remove()
leader.add_prefix(prefix2, stable=True)
test_prefix_add_remove()
leader.remove_prefix(prefix1)
test_prefix_add_remove()
leader.remove_prefix(prefix2)
test_prefix_add_remove()
leader.add_route(prefix1, stable=False)
num_routes = num_routes + 1
test_prefix_add_remove()
verify(len(wpan.parse_list(c2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))) == num_routes)
leader.add_route(prefix2, stable=True)
num_routes = num_routes + 1
test_prefix_add_remove()
verify(len(wpan.parse_list(c2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))) == num_routes)
leader.add_prefix(prefix3, stable=True)
test_prefix_add_remove()
verify(len(wpan.parse_list(c2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))) == num_routes)
leader.remove_route(prefix2)
num_routes = num_routes - 1
test_prefix_add_remove()
verify(len(wpan.parse_list(c2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))) == num_routes)
leader.remove_route(prefix1)
num_routes = num_routes - 1
test_prefix_add_remove()
verify(len(wpan.parse_list(c2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))) == num_routes)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| bsd-3-clause |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.09/IOST_WMain_CTRL.py | 3 | 2475 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WMain_CTRL.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_WRun import *
import gtk
import gtk.glade
class IOST_WMain_CTRL():
"""
"""
def __init__(self, glade_filename, object, builder=None):
"""
"""
if not builder:
self.IOST_CTRL_Builder = gtk.Builder()
self.IOST_CTRL_Builder.add_from_file(glade_filename)
self.IOST_CTRL_Builder.connect_signals(self)
else:
self.IOST_CTRL_Builder = builder
def GetCTRL_Obj(self, object_name):
"""
Get all CTRL objects on WMain window
"""
self.ConfigObjs[object_name]["SaveConfig_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["SaveConfig_B_Name"])
self.ConfigObjs[object_name]["Cancel_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["Cancel_B_Name"])
self.ConfigObjs[object_name]["Run_B_Obj"] = self.IOST_CTRL_Builder.get_object(self.ConfigObjs[object_name]["Run_B_Obj"])
#----------------------------------------------------------------------
# Run Button
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_Save_B_clicked(self, object, data=None):
"Control to Save Condfig button"
# --------------------
# Cancel Button
# --------------------
def on_IOST_Wmain_Config_CTRL_Cancel_B_clicked(self, object, data=None):
"Control to Cancel button"
#----------------------------------------------------------------------
def on_IOST_Wmain_Config_CTRL_Run_B_clicked(self, object, data=None):
"Control to Run button"
IOST_WRun.__init__(self, self.IOST_WMain_GladeFile,
self.ConfigObjs["IOST_WRun"]["WRun_Name"],
None)
self.ConfigObjs["IOST_WMain"]["WMain_Obj"].hide()
#
| mit |
Muyiafan/android_kernel_oneplus_msm8994 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
cindyyu/kuma | vendor/packages/pygments/styles/tango.py | 135 | 7096 | # -*- coding: utf-8 -*-
"""
pygments.styles.tango
~~~~~~~~~~~~~~~~~~~~~
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines
Butter: #fce94f #edd400 #c4a000
Orange: #fcaf3e #f57900 #ce5c00
Chocolate: #e9b96e #c17d11 #8f5902
Chameleon: #8ae234 #73d216 #4e9a06
Sky Blue: #729fcf #3465a4 #204a87
Plum: #ad7fa8 #75507b #5c35cc
Scarlet Red:#ef2929 #cc0000 #a40000
Aluminium: #eeeeec #d3d7cf #babdb6
#888a85 #555753 #2e3436
Not all of the above colors are used; other colors added:
very light grey: #f8f8f8 (for background)
This style can be used as a template as it includes all the known
Token types, unlike most (if not all) of the styles included in the
Pygments distribution.
However, since Crunchy is intended to be used by beginners, we have strived
to create a style that gloss over subtle distinctions between different
categories.
Taking Python for example, comments (Comment.*) and docstrings (String.Doc)
have been chosen to have the same style. Similarly, keywords (Keyword.*),
and Operator.Word (and, or, in) have been assigned the same style.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class TangoStyle(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mpl-2.0 |
flopezag/fiware-backlog | kernel/NM_LabReporter.py | 1 | 1450 | import pickle
import os
from kernel import settings
from kconfig import helpdeskBookByName, labsBookByName
from kernel.DataBoard import Data
from kernel.NM_Aggregates import Deck, WorkBacklog
from kernel.NM_BacklogReporter import BacklogReporter
from kernel.NM_HelpDeskReporter import DeckReporter
__author__ = 'Manuel Escriche'
class LabReporter(BacklogReporter, DeckReporter):
def __init__(self):
lab = labsBookByName['Lab']
backlog = WorkBacklog(*Data.getLab())
channel = helpdeskBookByName['Main-Help-Desk'].channels['Lab']
deck = Deck(*Data.getChannel(channel.key))
BacklogReporter.__init__(self, backlog)
# super(BacklogReporter).__init__(backlog)
DeckReporter.__init__(self, lab.name, deck)
# super(DeckReporter).__init__(deck)
self.save()
def __getstate__(self):
state = self.__dict__.copy()
return state
def __setstate__(self, state):
self.__dict__.update(state)
def save(self):
filename = 'FIWARE.LabReporter.pkl'
longfilename = os.path.join(settings.storeHome, filename)
with open(longfilename, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
@classmethod
def fromFile(cls):
filename = 'FIWARE.LabReporter.pkl'
with open(os.path.join(settings.storeHome, filename), 'rb') as f:
return pickle.load(f)
if __name__ == "__main__":
pass
| apache-2.0 |
InAnimaTe/CouchPotatoServer | libs/caper/matcher.py | 81 | 4952 | # Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from caper.helpers import is_list_type, update_dict, delta_seconds
from datetime import datetime
from logr import Logr
import re
class FragmentMatcher(object):
def __init__(self, pattern_groups):
self.regex = {}
self.construct_patterns(pattern_groups)
def construct_patterns(self, pattern_groups):
compile_start = datetime.now()
compile_count = 0
for group_name, patterns in pattern_groups:
if group_name not in self.regex:
self.regex[group_name] = []
# Transform into weight groups
if type(patterns[0]) is str or type(patterns[0][0]) not in [int, float]:
patterns = [(1.0, patterns)]
for weight, patterns in patterns:
weight_patterns = []
for pattern in patterns:
# Transform into multi-fragment patterns
if type(pattern) is str:
pattern = (pattern,)
if type(pattern) is tuple and len(pattern) == 2:
if type(pattern[0]) is str and is_list_type(pattern[1], str):
pattern = (pattern,)
result = []
for value in pattern:
if type(value) is tuple:
if len(value) == 2:
# Construct OR-list pattern
value = value[0] % '|'.join(value[1])
elif len(value) == 1:
value = value[0]
result.append(re.compile(value, re.IGNORECASE))
compile_count += 1
weight_patterns.append(tuple(result))
self.regex[group_name].append((weight, weight_patterns))
Logr.info("Compiled %s patterns in %ss", compile_count, delta_seconds(datetime.now() - compile_start))
def find_group(self, name):
for group_name, weight_groups in self.regex.items():
if group_name and group_name == name:
return group_name, weight_groups
return None, None
def value_match(self, value, group_name=None, single=True):
result = None
for group, weight_groups in self.regex.items():
if group_name and group != group_name:
continue
# TODO handle multiple weights
weight, patterns = weight_groups[0]
for pattern in patterns:
match = pattern[0].match(value)
if not match:
continue
if result is None:
result = {}
if group not in result:
result[group] = {}
result[group].update(match.groupdict())
if single:
return result
return result
def fragment_match(self, fragment, group_name=None):
"""Follow a fragment chain to try find a match
:type fragment: caper.objects.CaperFragment
:type group_name: str or None
:return: The weight of the match found between 0.0 and 1.0,
where 1.0 means perfect match and 0.0 means no match
:rtype: (float, dict, int)
"""
group_name, weight_groups = self.find_group(group_name)
for weight, patterns in weight_groups:
for pattern in patterns:
cur_fragment = fragment
success = True
result = {}
# Ignore empty patterns
if len(pattern) < 1:
break
for fragment_pattern in pattern:
if not cur_fragment:
success = False
break
match = fragment_pattern.match(cur_fragment.value)
if match:
update_dict(result, match.groupdict())
else:
success = False
break
cur_fragment = cur_fragment.right if cur_fragment else None
if success:
Logr.debug("Found match with weight %s" % weight)
return float(weight), result, len(pattern)
return 0.0, None, 1
| gpl-3.0 |
Eleonore9/data-wrangler | data_wrangler/views/blog.py | 1 | 1084 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint
from data_wrangler.services.blog import get_page, get_post_detail
from data_wrangler.utils import flash_errors, render_extensions
blueprint = Blueprint('blog', __name__, static_folder="../static")
@blueprint.route("/blog/<page>/", methods=["GET"])
def blog_page(page=None):
"""
:param page:
:return:
"""
page = int(page)
_page_size = 3 # TODO: move into settings
if page is None or page <= 0:
next_page = 0
prev_page = 1
current = True
else:
next_page = page - 1
prev_page = page + 1
current = False
posts = get_page(_page_size, page)
return render_extensions("blog/blog_page.html", posts=posts, next_page=next_page, prev_page=prev_page, current=current)
@blueprint.route("/post_detail/<pk>/", methods=["GET"])
def blog_detail(pk):
"""
:param pk:
:return:
"""
post = get_post_detail(int(pk))
return render_extensions("blog/blog_detail.html", post=post)
| bsd-3-clause |
msabramo/pip | tests/functional/test_install_vcs_git.py | 34 | 3757 | import pytest
from mock import patch
from pip.vcs.git import Git
from tests.lib import _create_test_package
from tests.lib.git_submodule_helpers import (
_change_test_package_submodule,
_pull_in_submodule_changes_to_module,
_create_test_package_with_submodule,
)
@pytest.mark.network
def test_get_refs_should_return_tag_name_and_commit_pair(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'tag', '0.1', cwd=version_pkg_path)
script.run('git', 'tag', '0.2', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
git = Git()
result = git.get_refs(version_pkg_path)
assert result['0.1'] == commit, result
assert result['0.2'] == commit, result
@pytest.mark.network
def test_get_refs_should_return_branch_name_and_commit_pair(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
git = Git()
result = git.get_refs(version_pkg_path)
assert result['master'] == commit, result
assert result['branch0.1'] == commit, result
@pytest.mark.network
def test_get_refs_should_ignore_no_branch(script):
version_pkg_path = _create_test_package(script)
script.run('git', 'branch', 'branch0.1', cwd=version_pkg_path)
commit = script.run(
'git', 'rev-parse', 'HEAD',
cwd=version_pkg_path
).stdout.strip()
# current branch here is "* (nobranch)"
script.run(
'git', 'checkout', commit,
cwd=version_pkg_path,
expect_stderr=True,
)
git = Git()
result = git.get_refs(version_pkg_path)
assert result['master'] == commit, result
assert result['branch0.1'] == commit, result
@patch('pip.vcs.git.Git.get_refs')
def test_check_rev_options_should_handle_branch_name(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('master', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_refs')
def test_check_rev_options_should_handle_tag_name(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456']
@patch('pip.vcs.git.Git.get_refs')
def test_check_rev_options_should_handle_ambiguous_commit(get_refs_mock):
get_refs_mock.return_value = {'master': '123456', '0.1': '123456'}
git = Git()
result = git.check_rev_options('0.1', '.', [])
assert result == ['123456'], result
# TODO(pnasrat) fix all helpers to do right things with paths on windows.
@pytest.mark.skipif("sys.platform == 'win32'")
@pytest.mark.network
def test_check_submodule_addition(script):
"""
Submodules are pulled in on install and updated on upgrade.
"""
module_path, submodule_path = _create_test_package_with_submodule(script)
install_result = script.pip(
'install', '-e', 'git+' + module_path + '#egg=version_pkg'
)
assert (
script.venv / 'src/version-pkg/testpkg/static/testfile'
in install_result.files_created
)
_change_test_package_submodule(script, submodule_path)
_pull_in_submodule_changes_to_module(script, module_path)
# expect error because git may write to stderr
update_result = script.pip(
'install', '-e', 'git+' + module_path + '#egg=version_pkg',
'--upgrade',
expect_error=True,
)
assert (
script.venv / 'src/version-pkg/testpkg/static/testfile2'
in update_result.files_created
)
| mit |
waynesun09/virt-test | virttest/libvirt_xml/base.py | 13 | 9021 | import logging
import imp
from autotest.client import utils
from virttest import propcan, xml_utils, virsh
from virttest.libvirt_xml import xcepts
class LibvirtXMLBase(propcan.PropCanBase):
"""
Base class for common attributes/methods applying to all sub-classes
Properties:
xml:
virtual XMLTreeFile instance
get:
xml filename string
set:
create new XMLTreeFile instance from string or filename
del:
deletes property, closes & unlinks any temp. files
xmltreefile:
XMLTreeFile instance
virsh:
virsh module or Virsh class instance
set:
validates and sets value
get:
returns value
del:
removes value
validates:
virtual boolean, read-only, True/False from virt-xml-validate
"""
__slots__ = ('xml', 'virsh', 'xmltreefile', 'validates')
__uncompareable__ = __slots__
__schema_name__ = None
def __init__(self, virsh_instance=virsh):
"""
Initialize instance with connection to virsh
:param virsh_instance: virsh module or instance to use
"""
self.__dict_set__('xmltreefile', None)
self.__dict_set__('validates', None)
super(LibvirtXMLBase, self).__init__({'virsh': virsh_instance,
'xml': None})
# Can't use accessors module here, would make circular dep.
def __str__(self):
"""
Returns raw XML as a string
"""
return str(self.__dict_get__('xml'))
def __eq__(self, other):
# Dynamic accessor methods mean we cannot compare class objects
# directly
if self.__class__.__name__ != other.__class__.__name__:
return False
# Don't assume both instances have same comparables
uncomparable = set(self.__uncompareable__)
uncomparable |= set(other.__uncompareable__)
dict_1 = {}
dict_2 = {}
slots = set(self.__all_slots__) | set(other.__all_slots__)
for slot in slots - uncomparable:
try:
dict_1[slot] = getattr(self, slot)
except xcepts.LibvirtXMLNotFoundError:
pass # Unset virtual values won't have keys
try:
dict_2[slot] = getattr(other, slot)
except xcepts.LibvirtXMLNotFoundError:
pass # Unset virtual values won't have keys
return dict_1 == dict_2
def __contains__(self, key):
"""
Also hide any Libvirt_xml API exceptions behind standard python behavior
"""
try:
return super(LibvirtXMLBase, self).__contains__(key)
except xcepts.LibvirtXMLError:
return False
return True
def set_virsh(self, value):
"""Accessor method for virsh property, make sure it's right type"""
value_type = type(value)
# issubclass can't work for classes using __slots__ (i.e. no __bases__)
if hasattr(value, 'VIRSH_EXEC') or hasattr(value, 'virsh_exec'):
self.__dict_set__('virsh', value)
else:
raise xcepts.LibvirtXMLError("virsh parameter must be a module "
"named virsh or subclass of virsh.VirshBase "
"not a %s" % str(value_type))
def set_xml(self, value):
"""
Accessor method for 'xml' property to load using xml_utils.XMLTreeFile
"""
# Always check to see if a "set" accessor is being called from __init__
if not self.__super_get__('INITIALIZED'):
self.__dict_set__('xml', value)
else:
try:
if self.__dict_get__('xml') is not None:
del self['xml'] # clean up old temporary files
except KeyError:
pass # Allow other exceptions through
# value could be filename or a string full of XML
self.__dict_set__('xml', xml_utils.XMLTreeFile(value))
def get_xml(self):
"""
Accessor method for 'xml' property returns xmlTreeFile backup filename
"""
return self.xmltreefile.name # The filename
def get_xmltreefile(self):
"""
Return the xmltreefile object backing this instance
"""
try:
# don't call get_xml() recursivly
xml = self.__dict_get__('xml')
if xml is None:
raise KeyError
except (KeyError, AttributeError):
raise xcepts.LibvirtXMLError("No xml data has been loaded")
return xml # XMLTreeFile loaded by set_xml() method
def set_xmltreefile(self, value):
"""
Point instance directly at an already initialized XMLTreeFile instance
"""
if not issubclass(type(value), xml_utils.XMLTreeFile):
raise xcepts.LibvirtXMLError("xmltreefile value must be XMLTreefile"
" type or subclass, not a %s"
% type(value))
self.__dict_set__('xml', value)
def del_xmltreefile(self):
"""
Remove all backing XML
"""
self.__dict_del__('xml')
def copy(self):
"""
Returns a copy of instance not sharing any references or modifications
"""
# help keep line length short, virsh is not a property
the_copy = self.__class__(virsh_instance=self.virsh)
try:
# file may not be accessible, obtain XML string value
xmlstr = str(self.__dict_get__('xml'))
# Create fresh/new XMLTreeFile along with tmp files from XML content
# content
the_copy.__dict_set__('xml', xml_utils.XMLTreeFile(xmlstr))
except xcepts.LibvirtXMLError: # Allow other exceptions through
pass # no XML was loaded yet
return the_copy
def get_section_string(self, xpath):
"""
Returns the content of section in xml.
"""
section = self.xmltreefile.find(xpath)
if section is None:
raise xcepts.LibvirtXMLNotFoundError("Path %s is not found." % xpath)
return self.xmltreefile.get_element_string(xpath)
def get_validates(self):
"""
Accessor method for 'validates' property returns virt-xml-validate T/F
"""
# self.xml is the filename
ret = self.virt_xml_validate(self.xml,
self.__super_get__('__schema_name__'))
if ret.exit_status == 0:
return True
else:
logging.debug(ret)
return False
def set_validates(self, value):
"""
Raises LibvirtXMLError
"""
del value # not needed
raise xcepts.LibvirtXMLError("Read only property")
def del_validates(self):
"""
Raises LibvirtXMLError
"""
raise xcepts.LibvirtXMLError("Read only property")
def restore(self):
"""
Restore current xml content to original source content
"""
self.xmltreefile.restore()
@staticmethod
def virt_xml_validate(filename, schema_name=None):
"""
Return CmdResult from running virt-xml-validate on backing XML
"""
command = 'virt-xml-validate %s' % filename
if schema_name:
command += ' %s' % schema_name
cmdresult = utils.run(command, ignore_status=True)
return cmdresult
def load_xml_module(path, name, type_list):
"""
Returns named xml element's handler class
:param path: the xml module path
:param name: the xml module name
:param type_list: the supported type list of xml module names
:return: the named xml element's handler class
"""
# Module names and tags are always all lower-case
name = str(name).lower()
errmsg = ("Unknown/unsupported type '%s', supported types %s"
% (str(name), type_list))
if name not in type_list:
raise xcepts.LibvirtXMLError(errmsg)
try:
filename, pathname, description = imp.find_module(name,
[path])
mod_obj = imp.load_module(name, filename, pathname, description)
# Enforce capitalized class names
return getattr(mod_obj, name.capitalize())
except TypeError, detail:
raise xcepts.LibvirtXMLError(errmsg + ': %s' % str(detail))
except ImportError, detail:
raise xcepts.LibvirtXMLError("Can't find module %s in %s: %s"
% (name, path, str(detail)))
except AttributeError, detail:
raise xcepts.LibvirtXMLError("Can't find class %s in %s module in "
"%s: %s"
% (name.capitalize(), name, pathname,
str(detail)))
| gpl-2.0 |
sinhrks/numpy | numpy/polynomial/chebyshev.py | 79 | 61966 | """
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting series
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = chebvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \cos(\pi (2 i - 1) / (2 n))
.. math:: w_i = \pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
| bsd-3-clause |
alexlo03/ansible | lib/ansible/modules/cloud/azure/azure_rm_routetable_facts.py | 25 | 5564 | #!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_routetable_facts
version_added: "2.7"
short_description: Get route table facts.
description:
- Get facts for a specific route table or all route table in a resource group or subscription.
options:
name:
description:
- Limit results to a specific route table.
resource_group:
description:
- Limit results in a specific resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Get facts for one route table
azure_rm_routetable_facts:
name: Testing
resource_group: foo
- name: Get facts for all route tables
azure_rm_routetable_facts:
resource_group: foo
- name: Get facts by tags
azure_rm_routetable_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
id:
description: Resource id.
returned: success
type: str
name:
description: Name of the resource.
returned: success
type: str
resource_group:
description: Resource group of the route table.
returned: success
type: str
disable_bgp_route_propagation:
description: Whether the routes learned by BGP on that route table disabled.
returned: success
type: bool
tags:
description: Tags of the route table.
returned: success
type: list
routes:
description: Current routes of the route table.
returned: success
type: list
sample: [
{
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/routeTables/foobar/routes/route",
"name": "route",
"resource_group": "Testing",
"routeTables": "foobar",
"address_prefix": "192.0.0.1",
"next_hop_type": "virtual_networkGateway"
}
]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
from ansible.module_utils.common.dict_transformations import _camel_to_snake
def route_to_dict(route):
id_dict = azure_id_to_dict(route.id)
return dict(
id=route.id,
name=route.name,
resource_group=id_dict.get('resourceGroups'),
route_table_name=id_dict.get('routeTables'),
address_prefix=route.address_prefix,
next_hop_type=_camel_to_snake(route.next_hop_type),
next_hop_ip_address=route.next_hop_ip_address
)
def instance_to_dict(table):
return dict(
id=table.id,
name=table.name,
resource_group=azure_id_to_dict(table.id).get('resourceGroups'),
location=table.location,
routes=[route_to_dict(i) for i in table.routes] if table.routes else [],
disable_bgp_route_propagation=table.disable_bgp_route_propagation,
tags=table.tags
)
class AzureRMRouteTableFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
route_tables=[]
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMRouteTableFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
response = []
if self.name:
response = self.get_item()
elif self.resource_group:
response = self.list_items()
else:
response = self.list_all_items()
self.results['route_tables'] = [instance_to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
return self.results
def get_item(self):
self.log('Get route table for {0}-{1}'.format(self.resource_group, self.name))
try:
item = self.network_client.route_tables.get(self.resource_group, self.name)
return [item]
except CloudError:
pass
return []
def list_items(self):
self.log('List all items in resource group')
try:
return self.network_client.route_tables.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list items - {0}".format(str(exc)))
return []
def list_all_items(self):
self.log("List all items in subscription")
try:
return self.network_client.route_tables.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
return []
def main():
AzureRMRouteTableFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/core/framework/reader_base_pb2.py | 2 | 3527 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/reader_base.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/reader_base.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n+tensorflow/core/framework/reader_base.proto\x12\ntensorflow\"r\n\x0fReaderBaseState\x12\x14\n\x0cwork_started\x18\x01 \x01(\x03\x12\x15\n\rwork_finished\x18\x02 \x01(\x03\x12\x1c\n\x14num_records_produced\x18\x03 \x01(\x03\x12\x14\n\x0c\x63urrent_work\x18\x04 \x01(\x0c\x42\x31\n\x18org.tensorflow.frameworkB\x10ReaderBaseProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_READERBASESTATE = _descriptor.Descriptor(
name='ReaderBaseState',
full_name='tensorflow.ReaderBaseState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='work_started', full_name='tensorflow.ReaderBaseState.work_started', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='work_finished', full_name='tensorflow.ReaderBaseState.work_finished', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_records_produced', full_name='tensorflow.ReaderBaseState.num_records_produced', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_work', full_name='tensorflow.ReaderBaseState.current_work', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=173,
)
DESCRIPTOR.message_types_by_name['ReaderBaseState'] = _READERBASESTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ReaderBaseState = _reflection.GeneratedProtocolMessageType('ReaderBaseState', (_message.Message,), dict(
DESCRIPTOR = _READERBASESTATE,
__module__ = 'tensorflow.core.framework.reader_base_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ReaderBaseState)
))
_sym_db.RegisterMessage(ReaderBaseState)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\020ReaderBaseProtosP\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
| mit |
Nelca/buildMLSystem | ch02/figure4_5.py | 3 | 1999 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from knn import learn_model, apply_model, accuracy
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def train_plot(features, labels):
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 100)
Y = np.linspace(y0, y1, 100)
X, Y = np.meshgrid(X, Y)
model = learn_model(1, features[:, (0, 2)], np.array(labels))
C = apply_model(
np.vstack([X.ravel(), Y.ravel()]).T, model).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .6, .6), (.6, 1., .6), (.6, .6, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
plt.xlim(x0, x1)
plt.ylim(y0, y1)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[2])
plt.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.0, 1., .0), (.0, .0, 1.)])
plt.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
plt.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.))
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
train_plot(features, labels)
plt.savefig('../1400_02_04.png')
features -= features.mean(0)
features /= features.std(0)
train_plot(features, labels)
plt.savefig('../1400_02_05.png')
| mit |
kfox1111/horizon | openstack_dashboard/openstack/common/policy.py | 8 | 28726 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import os
import re
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from openstack_dashboard.openstack.common import fileutils
from openstack_dashboard.openstack.common._i18n import _, _LE, _LW
from openstack_dashboard.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('The directories of policy configuration files is '
'stored')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.warn(_LW("Can not find policy directories %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
def _walk_through_policy_directory(self, path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
for policy_file in [p for p in policy_files if not p.startswith('.')]:
func(os.path.join(path, policy_file), *args)
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
:param path: It's value can be a full path or related path. When
full path specified, this function just returns the full
path. When related path specified, this function will
search configuration directories to find one that exists.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file/path couldn't
be located.
"""
policy_path = CONF.find_file(path)
if policy_path:
return policy_path
raise cfg.ConfigFilesNotFoundError((path,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
| apache-2.0 |
dbremner/bite-project | deps/gdata-python-client/tests/gdata_tests/auth_test.py | 126 | 26859 | #!/usr/bin/env python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import re
import unittest
import urllib
import gdata.auth
CONSUMER_KEY = 'www.yourwebapp.com'
CONSUMER_SECRET = 'qB1P2kCFDpRjF+/Iww4'
RSA_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDVbOaFW+KXecfFJn1PIzYHnNXFxhaQ36QM0K5uSb0Y8NeQUlD2
6t8aKgnm6mcb4vaopHjjdIGWgAzM5Dt0oPIiDXo+jSQbvCIXRduuAt+0cFGb2d+L
hALk4AwB8IVIkDJWwgo5Z2OLsP2r/wQlUYKm/tnvQaevK24jNYMLWVJl2QIDAQAB
AoGAU93ERBlUVEPFjaJPUX67p4gotNvfWDSZiXOjZ7FQPnG9s3e1WyH2Y5irZXMs
61dnp+NhobfRiGtvHEB/YJgyLRk/CJDnMKslo95e7o65IE9VkcyY6Yvt7YTslsRX
Eu7T0xLEA7ON46ypCwNLeWxpJ9SWisEKu2yZJnWauCXEsgUCQQD7b2ZuhGx3msoP
YEnwvucp0UxneCvb68otfERZ1J6NfNP47QJw6OwD3r1sWCJ27QZmpvtQH1f8sCk9
t22anGG7AkEA2UzXdtQ8H1uLAN/XXX2qoLuvJK5jRswHS4GeOg4pnnDSiHg3Vbva
AxmMIL93ufvIy/xdoENwDPfcI4CbYlrDewJAGWy7W+OSIEoLsqBW+bwkHetnIXNa
ZAOkzxKoyrigS8hamupEe+xhqUaFuwXyfjobkpfCA+kXeZrKoM4CjEbR7wJAHMbf
Vd4/ZAu0edYq6DenLAgO5rWtcge9A5PTx25utovMZcQ917273mM4unGAwoGEkvcF
0x57LUx5u73hVgIdFwJBAKWGuHRwGPgTWYvhpHM0qveH+8KdU9BUt/kV4ONxIVDB
ftetEmJirqOGLECbImoLcUwQrgfMW4ZCxOioJMz/gY0=
-----END RSA PRIVATE KEY-----
"""
class AuthModuleUtilitiesTest(unittest.TestCase):
def testGenerateClientLoginRequestBody(self):
body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com',
'password', 'test service', 'gdata.auth test')
expected_parameters = {'Email':r'jo%40gmail.com', 'Passwd':'password',
'service':'test+service', 'source':'gdata.auth+test',
'accountType':'HOSTED_OR_GOOGLE'}
self.__matchBody(body, expected_parameters)
body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com',
'password', 'test service', 'gdata.auth test', account_type='A TEST',
captcha_token='12345', captcha_response='test')
expected_parameters['accountType'] = 'A+TEST'
expected_parameters['logintoken'] = '12345'
expected_parameters['logincaptcha'] = 'test'
self.__matchBody(body, expected_parameters)
def __matchBody(self, body, expected_name_value_pairs):
parameters = body.split('&')
for param in parameters:
(name, value) = param.split('=')
self.assert_(expected_name_value_pairs[name] == value)
def testGenerateClientLoginAuthToken(self):
http_body = ('SID=DQAAAGgA7Zg8CTN\r\n'
'LSID=DQAAAGsAlk8BBbG\r\n'
'Auth=DQAAAGgAdk3fA5N')
self.assert_(gdata.auth.GenerateClientLoginAuthToken(http_body) ==
'GoogleLogin auth=DQAAAGgAdk3fA5N')
class GenerateClientLoginRequestBodyTest(unittest.TestCase):
def testPostBodyShouldMatchShortExample(self):
auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com',
'north23AZ', 'cl', 'Gulp-CalGulp-1.05')
self.assert_(-1 < auth_body.find('Email=johndoe%40gmail.com'))
self.assert_(-1 < auth_body.find('Passwd=north23AZ'))
self.assert_(-1 < auth_body.find('service=cl'))
self.assert_(-1 < auth_body.find('source=Gulp-CalGulp-1.05'))
def testPostBodyShouldMatchLongExample(self):
auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com',
'north23AZ', 'cl', 'Gulp-CalGulp-1.05',
captcha_token='DQAAAGgA...dkI1', captcha_response='brinmar')
self.assert_(-1 < auth_body.find('logintoken=DQAAAGgA...dkI1'))
self.assert_(-1 < auth_body.find('logincaptcha=brinmar'))
def testEquivalenceWithOldLogic(self):
email = 'jo@gmail.com'
password = 'password'
account_type = 'HOSTED'
service = 'test'
source = 'auth test'
old_request_body = urllib.urlencode({'Email': email,
'Passwd': password,
'accountType': account_type,
'service': service,
'source': source})
new_request_body = gdata.auth.GenerateClientLoginRequestBody(email,
password, service, source, account_type=account_type)
for parameter in old_request_body.split('&'):
self.assert_(-1 < new_request_body.find(parameter))
class GenerateAuthSubUrlTest(unittest.TestCase):
def testDefaultParameters(self):
url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5',
'http://www.google.com/test/feeds')
self.assert_(-1 < url.find(
r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds'))
self.assert_(-1 < url.find(
r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5'))
self.assert_(-1 < url.find('secure=0'))
self.assert_(-1 < url.find('session=1'))
def testAllParameters(self):
url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5',
'http://www.google.com/test/feeds', secure=True, session=False,
request_url='https://example.com/auth')
self.assert_(-1 < url.find(
r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds'))
self.assert_(-1 < url.find(
r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5'))
self.assert_(-1 < url.find('secure=1'))
self.assert_(-1 < url.find('session=0'))
self.assert_(url.startswith('https://example.com/auth'))
class GenerateOAuthRequestTokenUrlTest(unittest.TestCase):
def testDefaultParameters(self):
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
scopes = [
'http://abcd.example.com/feeds',
'http://www.example.com/abcd/feeds'
]
url = gdata.auth.GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes=scopes)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthGetRequestToken', url.path)
self.assertEquals('1.0', url.params['oauth_version'])
self.assertEquals('RSA-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
actual_scopes = url.params['scope'].split(' ')
self.assertEquals(2, len(actual_scopes))
for scope in actual_scopes:
self.assert_(scope in scopes)
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
def testAllParameters(self):
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
scopes = ['http://abcd.example.com/feeds']
url = gdata.auth.GenerateOAuthRequestTokenUrl(
oauth_input_params, scopes=scopes,
request_token_url='https://www.example.com/accounts/OAuthRequestToken',
extra_parameters={'oauth_version': '2.0', 'my_param': 'my_value'})
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthRequestToken', url.path)
self.assertEquals('2.0', url.params['oauth_version'])
self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
actual_scopes = url.params['scope'].split(' ')
self.assertEquals(1, len(actual_scopes))
for scope in actual_scopes:
self.assert_(scope in scopes)
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
self.assertEquals('my_value', url.params['my_param'])
class GenerateOAuthAuthorizationUrlTest(unittest.TestCase):
def testDefaultParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret)
url = gdata.auth.GenerateOAuthAuthorizationUrl(request_token)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthAuthorizeToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
def testAllParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
scopes = [
'http://abcd.example.com/feeds',
'http://www.example.com/abcd/feeds'
]
request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret,
scopes=scopes)
url = gdata.auth.GenerateOAuthAuthorizationUrl(
request_token,
authorization_url='https://www.example.com/accounts/OAuthAuthToken',
callback_url='http://www.yourwebapp.com/print',
extra_params={'permission': '1'},
include_scopes_in_callback=True, scopes_param_prefix='token_scope')
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthAuthToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
expected_callback_url = ('http://www.yourwebapp.com/print?'
'token_scope=http%3A%2F%2Fabcd.example.com%2Ffeeds'
'+http%3A%2F%2Fwww.example.com%2Fabcd%2Ffeeds')
self.assertEquals(expected_callback_url, url.params['oauth_callback'])
class GenerateOAuthAccessTokenUrlTest(unittest.TestCase):
def testDefaultParameters(self):
token_key = 'ABCDDSFFDSG'
token_secret = 'SDFDSGSDADADSAF'
authorized_request_token = gdata.auth.OAuthToken(key=token_key,
secret=token_secret)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
url = gdata.auth.GenerateOAuthAccessTokenUrl(authorized_request_token,
oauth_input_params)
self.assertEquals('https', url.protocol)
self.assertEquals('www.google.com', url.host)
self.assertEquals('/accounts/OAuthGetAccessToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
self.assertEquals('1.0', url.params['oauth_version'])
self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
def testAllParameters(self):
token_key = 'ABCDDSFFDSG'
authorized_request_token = gdata.auth.OAuthToken(key=token_key)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
url = gdata.auth.GenerateOAuthAccessTokenUrl(
authorized_request_token, oauth_input_params,
access_token_url='https://www.example.com/accounts/OAuthGetAccessToken',
oauth_version= '2.0')
self.assertEquals('https', url.protocol)
self.assertEquals('www.example.com', url.host)
self.assertEquals('/accounts/OAuthGetAccessToken', url.path)
self.assertEquals(token_key, url.params['oauth_token'])
self.assertEquals('2.0', url.params['oauth_version'])
self.assertEquals('RSA-SHA1', url.params['oauth_signature_method'])
self.assert_(url.params['oauth_nonce'])
self.assert_(url.params['oauth_timestamp'])
self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key'])
self.assert_(url.params['oauth_signature'])
class ExtractAuthSubTokensTest(unittest.TestCase):
def testGetTokenFromUrl(self):
url = 'http://www.yourwebapp.com/showcalendar.html?token=CKF50YzIH'
self.assert_(gdata.auth.AuthSubTokenFromUrl(url) ==
'AuthSub token=CKF50YzIH')
self.assert_(gdata.auth.TokenFromUrl(url) == 'CKF50YzIH')
url = 'http://www.yourwebapp.com/showcalendar.html?token==tokenCKF50YzIH='
self.assert_(gdata.auth.AuthSubTokenFromUrl(url) ==
'AuthSub token==tokenCKF50YzIH=')
self.assert_(gdata.auth.TokenFromUrl(url) == '=tokenCKF50YzIH=')
def testGetTokenFromHttpResponse(self):
response_body = ('Token=DQAA...7DCTN\r\n'
'Expiration=20061004T123456Z')
self.assert_(gdata.auth.AuthSubTokenFromHttpBody(response_body) ==
'AuthSub token=DQAA...7DCTN')
class CreateAuthSubTokenFlowTest(unittest.TestCase):
def testGenerateRequest(self):
request_url = gdata.auth.generate_auth_sub_url(next='http://example.com',
scopes=['http://www.blogger.com/feeds/',
'http://www.google.com/base/feeds/'])
self.assertEquals(request_url.protocol, 'https')
self.assertEquals(request_url.host, 'www.google.com')
self.assertEquals(request_url.params['scope'],
'http://www.blogger.com/feeds/ http://www.google.com/base/feeds/')
self.assertEquals(request_url.params['hd'], 'default')
self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1)
self.assert_(request_url.params['next'].startswith('http://example.com'))
# Use a more complicated 'next' URL.
request_url = gdata.auth.generate_auth_sub_url(
next='http://example.com/?token_scope=http://www.blogger.com/feeds/',
scopes=['http://www.blogger.com/feeds/',
'http://www.google.com/base/feeds/'])
self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1)
self.assert_(request_url.params['next'].find('token_scope') > -1)
self.assert_(request_url.params['next'].startswith('http://example.com/'))
def testParseNextUrl(self):
url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&'
'token=my_nifty_token')
token = gdata.auth.extract_auth_sub_token_from_url(url)
self.assertEquals(token.get_token_string(), 'my_nifty_token')
self.assert_(isinstance(token, gdata.auth.AuthSubToken))
self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/'))
self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/'))
self.assert_(
not token.valid_for_scope('http://www.google.com/calendar/feeds/'))
# Parse a more complicated response.
url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&'
'token_scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F&'
'token=second_token')
token = gdata.auth.extract_auth_sub_token_from_url(url)
self.assertEquals(token.get_token_string(), 'second_token')
self.assert_(isinstance(token, gdata.auth.AuthSubToken))
self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/'))
self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/'))
self.assert_(
not token.valid_for_scope('http://www.google.com/calendar/feeds/'))
def testParseNextWithNoToken(self):
token = gdata.auth.extract_auth_sub_token_from_url('http://example.com/')
self.assert_(token is None)
token = gdata.auth.extract_auth_sub_token_from_url(
'http://example.com/?no_token=foo&other=1')
self.assert_(token is None)
class ExtractClientLoginTokenTest(unittest.TestCase):
def testExtractFromBodyWithScopes(self):
http_body_string = ('SID=DQAAAGgA7Zg8CTN\r\n'
'LSID=DQAAAGsAlk8BBbG\r\n'
'Auth=DQAAAGgAdk3fA5N')
token = gdata.auth.extract_client_login_token(http_body_string,
['http://docs.google.com/feeds/'])
self.assertEquals(token.get_token_string(), 'DQAAAGgAdk3fA5N')
self.assert_(isinstance(token, gdata.auth.ClientLoginToken))
self.assert_(token.valid_for_scope('http://docs.google.com/feeds/'))
self.assert_(not token.valid_for_scope('http://www.blogger.com/feeds'))
class ExtractOAuthTokensTest(unittest.TestCase):
def testOAuthTokenFromUrl(self):
scope_1 = 'http://docs.google.com/feeds/'
scope_2 = 'http://www.blogger.com/feeds/'
# Case 1: token and scopes both are present.
url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url)
self.assertEquals('CMns6t7MCxDz__8B', token.key)
self.assertEquals(2, len(token.scopes))
self.assert_(scope_1 in token.scopes)
self.assert_(scope_2 in token.scopes)
# Case 2: token and scopes both are present but scope_param_prefix
# passed does not match the one present in the URL.
url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url,
scopes_param_prefix='token_scope')
self.assertEquals('CMns6t7MCxDz__8B', token.key)
self.assert_(not token.scopes)
# Case 3: None present.
url = ('http://dummy.com/?no_oauth_token_scope=http%3A%2F%2Fwww.blogger.com'
'%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&'
'no_oauth_token=CMns6t7MCxDz__8B')
token = gdata.auth.OAuthTokenFromUrl(url)
self.assert_(token is None)
def testOAuthTokenFromHttpBody(self):
token_key = 'ABCD'
token_secret = 'XYZ'
# Case 1: token key and secret both present single time.
http_body = 'oauth_token=%s&oauth_token_secret=%s' % (token_key,
token_secret)
token = gdata.auth.OAuthTokenFromHttpBody(http_body)
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
class OAuthInputParametersTest(unittest.TestCase):
def setUp(self):
self.oauth_input_parameters_hmac = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
self.oauth_input_parameters_rsa = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
def testGetSignatureMethod(self):
self.assertEquals(
'HMAC-SHA1',
self.oauth_input_parameters_hmac.GetSignatureMethod().get_name())
rsa_signature_method = self.oauth_input_parameters_rsa.GetSignatureMethod()
self.assertEquals('RSA-SHA1', rsa_signature_method.get_name())
self.assertEquals(RSA_KEY, rsa_signature_method._fetch_private_cert(None))
def testGetConsumer(self):
self.assertEquals(CONSUMER_KEY,
self.oauth_input_parameters_hmac.GetConsumer().key)
self.assertEquals(CONSUMER_KEY,
self.oauth_input_parameters_rsa.GetConsumer().key)
self.assertEquals(CONSUMER_SECRET,
self.oauth_input_parameters_hmac.GetConsumer().secret)
self.assert_(self.oauth_input_parameters_rsa.GetConsumer().secret is None)
class TokenClassesTest(unittest.TestCase):
def testClientLoginToAndFromString(self):
token = gdata.auth.ClientLoginToken()
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(token.auth_header, '%s%s' % (
gdata.auth.PROGRAMMATIC_AUTH_LABEL, 'foo'))
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
def testAuthSubToAndFromString(self):
token = gdata.auth.AuthSubToken()
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(token.auth_header, '%s%s' % (
gdata.auth.AUTHSUB_AUTH_LABEL, 'foo'))
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
def testSecureAuthSubToAndFromString(self):
# Case 1: no token.
token = gdata.auth.SecureAuthSubToken(RSA_KEY)
token.set_token_string('foo')
self.assertEquals(token.get_token_string(), 'foo')
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(str(token), 'foo')
# Case 2: token is a string
token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo')
self.assertEquals(token.get_token_string(), 'foo')
token.set_token_string(token.get_token_string())
self.assertEquals(token.get_token_string(), 'foo')
self.assertEquals(str(token), 'foo')
def testOAuthToAndFromString(self):
token_key = 'ABCD'
token_secret = 'XYZ'
# Case 1: token key and secret both present single time.
token_string = 'oauth_token=%s&oauth_token_secret=%s' % (token_key,
token_secret)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0]))
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1]))
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
# Case 2: token key and secret both present multiple times with unwanted
# parameters.
token_string = ('oauth_token=%s&oauth_token_secret=%s&'
'oauth_token=%s&ExtraParams=GarbageString' % (token_key,
token_secret,
'LMNO'))
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0]))
self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1]))
self.assertEquals(token_key, token.key)
self.assertEquals(token_secret, token.secret)
# Case 3: Only token key present.
token_string = 'oauth_token=%s' % (token_key,)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assertEquals(token_string, token.get_token_string())
self.assertEquals(token_key, token.key)
self.assert_(not token.secret)
# Case 4: Only token key present.
token_string = 'oauth_token_secret=%s' % (token_secret,)
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assertEquals(token_string, token.get_token_string())
self.assertEquals(token_secret, token.secret)
self.assert_(not token.key)
# Case 5: None present.
token_string = ''
token = gdata.auth.OAuthToken()
token.set_token_string(token_string)
self.assert_(token.get_token_string() is None)
self.assert_(not token.key)
self.assert_(not token.secret)
def testSecureAuthSubGetAuthHeader(self):
# Case 1: Presence of OAuth token (in case of 3-legged OAuth)
url = 'http://dummy.com/?q=notebook&s=true'
token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo')
auth_header = token.GetAuthHeader('GET', url)
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(header_value.startswith(r'AuthSub token="foo"'))
self.assert_(-1 < header_value.find(r'sigalg="rsa-sha1"'))
self.assert_(-1 < header_value.find(r'data="'))
self.assert_(-1 < header_value.find(r'sig="'))
m = re.search(r'data="(.*?)"', header_value)
self.assert_(m is not None)
data = m.group(1)
self.assert_(data.startswith('GET'))
self.assert_(-1 < data.find(url))
def testOAuthGetAuthHeader(self):
# Case 1: Presence of OAuth token (in case of 3-legged OAuth)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY,
rsa_key=RSA_KEY)
token = gdata.auth.OAuthToken(key='ABCDDSFFDSG',
oauth_input_params=oauth_input_params)
auth_header = token.GetAuthHeader('GET',
'http://dummy.com/?q=notebook&s=true',
realm='http://dummy.com')
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(-1 < header_value.find(r'OAuth realm="http://dummy.com"'))
self.assert_(-1 < header_value.find(r'oauth_version="1.0"'))
self.assert_(-1 < header_value.find(r'oauth_token="ABCDDSFFDSG"'))
self.assert_(-1 < header_value.find(r'oauth_nonce="'))
self.assert_(-1 < header_value.find(r'oauth_timestamp="'))
self.assert_(-1 < header_value.find(r'oauth_signature="'))
self.assert_(-1 < header_value.find(
r'oauth_consumer_key="%s"' % CONSUMER_KEY))
self.assert_(-1 < header_value.find(r'oauth_signature_method="RSA-SHA1"'))
# Case 2: Absence of OAuth token (in case of 2-legged OAuth)
oauth_input_params = gdata.auth.OAuthInputParams(
gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET)
token = gdata.auth.OAuthToken(oauth_input_params=oauth_input_params)
auth_header = token.GetAuthHeader(
'GET', 'http://dummy.com/?xoauth_requestor_id=user@gmail.com&q=book')
self.assert_('Authorization' in auth_header)
header_value = auth_header['Authorization']
self.assert_(-1 < header_value.find(r'OAuth realm=""'))
self.assert_(-1 < header_value.find(r'oauth_version="1.0"'))
self.assertEquals(-1, header_value.find(r'oauth_token='))
self.assert_(-1 < header_value.find(r'oauth_nonce="'))
self.assert_(-1 < header_value.find(r'oauth_timestamp="'))
self.assert_(-1 < header_value.find(r'oauth_signature="'))
self.assert_(-1 < header_value.find(
r'oauth_consumer_key="%s"' % CONSUMER_KEY))
self.assert_(-1 < header_value.find(r'oauth_signature_method="HMAC-SHA1"'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dya2/python-for-android | python-build/python-libs/ase/scripts/test.py | 64 | 5166 | import sys
import types
# Test imports.
import android
import BeautifulSoup
import gdata.docs.service
import sqlite3
import termios
import time
import xmpp
droid = android.Android()
def event_loop():
for i in range(10):
e = droid.eventPoll(1)
if e.result is not None:
return True
time.sleep(2)
return False
def test_clipboard():
previous = droid.getClipboard().result
msg = 'Hello, world!'
droid.setClipboard(msg)
echo = droid.getClipboard().result
droid.setClipboard(previous)
return echo == msg
def test_gdata():
# Create a client class which will make HTTP requests with Google Docs server.
client = gdata.docs.service.DocsService()
# Authenticate using your Google Docs email address and password.
username = droid.dialogGetInput('Username').result
password = droid.dialogGetPassword('Password', 'For ' + username).result
try:
client.ClientLogin(username, password)
except:
return False
# Query the server for an Atom feed containing a list of your documents.
documents_feed = client.GetDocumentListFeed()
# Loop through the feed and extract each document entry.
return bool(list(documents_feed.entry))
def test_gps():
droid.startLocating()
try:
return event_loop()
finally:
droid.stopLocating()
def test_sensors():
droid.startSensing()
try:
return event_loop()
finally:
droid.stopSensing()
def test_speak():
result = droid.ttsSpeak('Hello, world!')
return result.error is None
def test_phone_state():
droid.startTrackingPhoneState()
try:
return event_loop()
finally:
droid.stopTrackingPhoneState()
def test_ringer_silent():
result1 = droid.toggleRingerSilentMode()
result2 = droid.toggleRingerSilentMode()
return result1.error is None and result2.error is None
def test_ringer_volume():
get_result = droid.getRingerVolume()
if get_result.error is not None:
return False
droid.setRingerVolume(0)
set_result = droid.setRingerVolume(get_result.result)
if set_result.error is not None:
return False
return True
def test_get_last_known_location():
result = droid.getLastKnownLocation()
return result.error is None
def test_geocode():
result = droid.geocode(0.0, 0.0, 1)
return result.error is None
def test_wifi():
result1 = droid.toggleWifiState()
result2 = droid.toggleWifiState()
return result1.error is None and result2.error is None
def test_make_toast():
result = droid.makeToast('Hello, world!')
return result.error is None
def test_vibrate():
result = droid.vibrate()
return result.error is None
def test_notify():
result = droid.notify('Test Title', 'Hello, world!')
return result.error is None
def test_get_running_packages():
result = droid.getRunningPackages()
return result.error is None
def test_alert_dialog():
title = 'User Interface'
message = 'Welcome to the SL4A integration test.'
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Continue')
droid.dialogShow()
response = droid.dialogGetResponse().result
return response['which'] == 'positive'
def test_alert_dialog_with_buttons():
title = 'Alert'
message = ('This alert box has 3 buttons and '
'will wait for you to press one.')
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Yes')
droid.dialogSetNegativeButtonText('No')
droid.dialogSetNeutralButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
return response['which'] in ('positive', 'negative', 'neutral')
def test_spinner_progress():
title = 'Spinner'
message = 'This is simple spinner progress.'
droid.dialogCreateSpinnerProgress(title, message)
droid.dialogShow()
time.sleep(2)
droid.dialogDismiss()
return True
def test_horizontal_progress():
title = 'Horizontal'
message = 'This is simple horizontal progress.'
droid.dialogCreateHorizontalProgress(title, message, 50)
droid.dialogShow()
for x in range(0, 50):
time.sleep(0.1)
droid.dialogSetCurrentProgress(x)
droid.dialogDismiss()
return True
def test_alert_dialog_with_list():
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetItems(['foo', 'bar', 'baz'])
droid.dialogShow()
response = droid.dialogGetResponse().result
return True
def test_alert_dialog_with_single_choice_list():
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetSingleChoiceItems(['foo', 'bar', 'baz'])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
return True
def test_alert_dialog_with_multi_choice_list():
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetMultiChoiceItems(['foo', 'bar', 'baz'], [])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
return True
if __name__ == '__main__':
for name, value in globals().items():
if name.startswith('test_') and isinstance(value, types.FunctionType):
print 'Running %s...' % name,
sys.stdout.flush()
if value():
print ' PASS'
else:
print ' FAIL'
| apache-2.0 |
dameiss/wireshark-aeron | tools/dftestlib/double.py | 40 | 2523 | # Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testDouble(dftest.DFTest):
trace_file = "ntp.pcap"
def test_eq_1(self):
dfilter = "ntp.rootdelay == 0.0626983642578125"
self.assertDFilterCount(dfilter, 1)
def test_eq_2(self):
dfilter = "ntp.rootdelay == 0.0626"
self.assertDFilterCount(dfilter, 0)
def test_gt_1(self):
dfilter = "ntp.rootdelay > 1.0626"
self.assertDFilterCount(dfilter, 0)
def test_gt_2(self):
dfilter = "ntp.rootdelay > 0.0626983642578125"
self.assertDFilterCount(dfilter, 0)
def test_gt_3(self):
dfilter = "ntp.rootdelay > 0.0026"
self.assertDFilterCount(dfilter, 1)
def test_ge_1(self):
dfilter = "ntp.rootdelay >= 1.0026"
self.assertDFilterCount(dfilter, 0)
def test_ge_2(self):
dfilter = "ntp.rootdelay >= 0.0626983642578125"
self.assertDFilterCount(dfilter, 1)
def test_ge_3(self):
dfilter = "ntp.rootdelay >= 0.0026"
self.assertDFilterCount(dfilter, 1)
def test_lt_1(self):
dfilter = "ntp.rootdelay < 1.0026"
self.assertDFilterCount(dfilter, 1)
def test_lt_2(self):
dfilter = "ntp.rootdelay < 0.0626983642578125"
self.assertDFilterCount(dfilter, 0)
def test_lt_3(self):
dfilter = "ntp.rootdelay < 0.0026"
self.assertDFilterCount(dfilter, 0)
def test_le_1(self):
dfilter = "ntp.rootdelay <= 1.0026"
self.assertDFilterCount(dfilter, 1)
def test_le_2(self):
dfilter = "ntp.rootdelay <= 0.0626983642578125"
self.assertDFilterCount(dfilter, 1)
def test_le_3(self):
dfilter = "ntp.rootdelay <= 0.0026"
self.assertDFilterCount(dfilter, 0)
| gpl-2.0 |
softDi/clusim | ns3/ns-3.26/src/csma/bindings/callbacks_list.py | 46 | 1082 | callback_classes = [
['void', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| apache-2.0 |
OpenPymeMx/OCB | addons/pad/pad.py | 13 | 4352 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import random
import re
import string
import urllib2
import logging
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from py_etherpad import EtherpadLiteClient
_logger = logging.getLogger(__name__)
class pad_common(osv.osv_memory):
_name = 'pad.common'
def pad_is_configured(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return bool(user.company_id.pad_server)
def pad_generate_url(self, cr, uid, context=None):
company = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context).company_id
pad = {
"server" : company.pad_server,
"key" : company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.randint(0, len(s) - 1)] for i in range(10)])
#path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (cr.dbname.replace('_','-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
#if create with content
if "field_name" in context and "model" in context and "object_id" in context:
myPad = EtherpadLiteClient( pad["key"], pad["server"]+'/api')
try:
myPad.createPad(path)
except urllib2.URLError:
raise osv.except_osv(_("Error"), _("Pad creation fail, \
either there is a problem with your pad server URL or with your connection."))
#get attr on the field model
model = self.pool.get(context["model"])
field = model._all_columns[context['field_name']]
real_field = field.column.pad_content_field
#get content of the real field
for record in model.browse(cr, uid, [context["object_id"]]):
if record[real_field]:
myPad.setText(path, (html2plaintext(record[real_field]).encode('utf-8')))
#Etherpad for html not functional
#myPad.setHTML(path, record[real_field])
return {
"server": pad["server"],
"path": path,
"url": url,
}
def pad_get_content(self, cr, uid, url, context=None):
content = ''
if url:
try:
page = urllib2.urlopen('%s/export/html'%url).read()
mo = re.search('<body>(.*)</body>',page)
if mo:
content = mo.group(1)
except:
_logger.warning("No url found '%s'.", url)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, cr, uid, ids, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
self._set_pad_value(cr, uid, vals, context)
return super(pad_common, self).create(cr, uid, vals, context=context)
# Set the pad content in vals
def _set_pad_value(self, cr, uid, vals, context=None):
for k,v in vals.items():
field = self._all_columns[k].column
if hasattr(field,'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(cr, uid, v, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
for k, v in self._all_columns.iteritems():
field = v.column
if hasattr(field,'pad_content_field'):
pad = self.pad_generate_url(cr, uid, context)
default[k] = pad.get('url')
return super(pad_common, self).copy(cr, uid, id, default, context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cryptobanana/ansible | test/units/modules/network/f5/test_bigip_qkview.py | 7 | 5831 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.f5_utils import AnsibleF5Client
try:
from library.bigip_qkview import Parameters
from library.bigip_qkview import ModuleManager
from library.bigip_qkview import MadmLocationManager
from library.bigip_qkview import BulkLocationManager
from library.bigip_qkview import ArgumentSpec
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_qkview import Parameters
from ansible.modules.network.f5.bigip_qkview import ModuleManager
from ansible.modules.network.f5.bigip_qkview import MadmLocationManager
from ansible.modules.network.f5.bigip_qkview import BulkLocationManager
from ansible.modules.network.f5.bigip_qkview import ArgumentSpec
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
filename='foo.qkview',
asm_request_log=False,
max_file_size=1024,
complete_information=True,
exclude_core=True,
force=False,
exclude=['audit', 'secure'],
dest='/tmp/foo.qkview'
)
p = Parameters(args)
assert p.filename == 'foo.qkview'
assert p.asm_request_log is None
assert p.max_file_size == '-s 1024'
assert p.complete_information == '-c'
assert p.exclude_core == '-C'
assert p.force is False
assert len(p.exclude_core) == 2
assert 'audit' in p.exclude
assert 'secure' in p.exclude
assert p.dest == '/tmp/foo.qkview'
def test_module_asm_parameter(self):
args = dict(
asm_request_log=True,
)
p = Parameters(args)
assert p.asm_request_log == '-o asm-request-log'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestMadmLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
server='localhost',
user='admin',
password='password'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = MadmLocationManager(client)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.is_version_less_than_14 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestBulkLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
server='localhost',
user='admin',
password='password'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
# Override methods in the specific type of manager
tm = BulkLocationManager(client)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.is_version_less_than_14 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
| gpl-3.0 |
seanchen/taiga-back | taiga/timeline/apps.py | 4 | 1595 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
from . import signals as handlers
from taiga.projects.history.models import HistoryEntry
class TimelineAppConfig(AppConfig):
name = "taiga.timeline"
verbose_name = "Timeline"
def ready(self):
signals.post_save.connect(handlers.on_new_history_entry, sender=HistoryEntry, dispatch_uid="timeline")
signals.pre_save.connect(handlers.create_membership_push_to_timeline,
sender=apps.get_model("projects", "Membership"))
signals.post_delete.connect(handlers.delete_membership_push_to_timeline,
sender=apps.get_model("projects", "Membership"))
| agpl-3.0 |
vitiral/micropython | tests/extmod/uctypes_native_le.py | 59 | 2037 | # This test is exactly like uctypes_le.py, but uses native structure layout.
# Codepaths for packed vs native structures are different. This test only works
# on little-endian machine (no matter if 32 or 64 bit).
import sys
import uctypes
if sys.byteorder != "little":
print("SKIP")
sys.exit()
desc = {
"s0": uctypes.UINT16 | 0,
"sub": (0, {
"b0": uctypes.UINT8 | 0,
"b1": uctypes.UINT8 | 1,
}),
"arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 2),
"arr2": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0}),
"bitf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bitf1": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 8 << uctypes.BF_LEN,
"bf0": uctypes.BFUINT16 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf1": uctypes.BFUINT16 | 0 | 4 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf2": uctypes.BFUINT16 | 0 | 8 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"bf3": uctypes.BFUINT16 | 0 | 12 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
"ptr": (uctypes.PTR | 0, uctypes.UINT8),
"ptr2": (uctypes.PTR | 0, {"b": uctypes.UINT8 | 0}),
}
data = bytearray(b"01")
S = uctypes.struct(uctypes.addressof(data), desc, uctypes.NATIVE)
#print(S)
print(hex(S.s0))
assert hex(S.s0) == "0x3130"
#print(S.sub.b0)
print(S.sub.b0, S.sub.b1)
assert S.sub.b0, S.sub.b1 == (0x30, 0x31)
try:
S[0]
assert False, "Can't index struct"
except TypeError:
print("TypeError")
print("arr:", S.arr[0], S.arr[1])
assert (S.arr[0], S.arr[1]) == (0x30, 0x31)
print("arr of struct:", S.arr2[0].b, S.arr2[1].b)
assert (S.arr2[0].b, S.arr2[1].b) == (0x30, 0x31)
try:
S.arr[2]
assert False, "Out of bounds index"
except IndexError:
print("IndexError")
print("bf:", S.bitf0, S.bitf1)
assert (S.bitf0, S.bitf1) == (0x30, 0x31)
print("bf 4bit:", S.bf3, S.bf2, S.bf1, S.bf0)
assert (S.bf3, S.bf2, S.bf1, S.bf0) == (3, 1, 3, 0)
# Write access
S.sub.b0 = ord("2")
print(data)
assert bytes(data) == b"21"
S.bf3 = 5
print(data)
assert bytes(data) == b"2Q"
| mit |
highweb-project/highweb-webcl-html5spec | net/data/verify_certificate_chain_unittest/generate-intermediary-basic-constraints-not-critical.py | 16 | 1043 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary and a trusted root. The intermediary
has a basic constraints extension but does not mark it as critical.
Verification is expected to succeed, since although not critical, the
basicConstraints indicates CA=true as expected."""
import common
# Self-signed root certificate (part of trust store).
root = common.create_self_signed_root_certificate('Root')
# Intermediary with non-critical basic constarints.
intermediary = common.create_intermediary_certificate('Intermediary', root)
intermediary.get_extensions().set_property('basicConstraints', 'CA:true')
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediary)
chain = [target, intermediary]
trusted = [root]
time = common.DEFAULT_TIME
verify_result = True
common.write_test_file(__doc__, chain, trusted, time, verify_result)
| bsd-3-clause |
thatchristoph/namebench | tools/convert_server_list.py | 174 | 2968 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for checking a lot of DNS servers from stdin for possible inclusion."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import re
import sys
import pygeoip
sys.path.append('..')
sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import nameserver_list
from libnamebench import config
from libnamebench import addr_util
geo_city = pygeoip.GeoIP('/usr/local/share/GeoLiteCity.dat')
(options, supplied_ns, global_ns, regional_ns) = config.GetConfiguration()
cfg_nameservers = global_ns + regional_ns
#cfg_nameservers = [('205.151.67.2', '205.151.67.2')]
nameserver_list.MAX_INITIAL_HEALTH_THREAD_COUNT = 100
nameservers = nameserver_list.NameServers(
cfg_nameservers,
timeout=30,
health_timeout=30,
threads=100,
skip_cache_collusion_checks=True,
)
nameservers.PingNameServers()
for ns in nameservers:
if ':' in ns.ip:
details = {}
else:
try:
details = geo_city.record_by_addr(ns.ip)
except:
pass
if not details:
details = {}
city = details.get('city', '')
if city:
city = city.decode('latin-1')
country = details.get('country_name', '')
if country:
country = country.decode('latin-1')
latitude = details.get('latitude', '')
longitude = details.get('longitude', '')
country_code = details.get('country_code', '')
region = details.get('region_name', '')
if region:
region = region.decode('latin-1')
matches = re.search('[- ](\d+)', ns.name)
if matches:
instance = matches.group(1)
ns.name = re.sub('[- ]%s' % instance, '', ns.name)
main = u"%s=%s (%s)" % (ns.ip, ns.name.decode('latin-1'), instance.decode('latin-1'))
else:
try:
main = u"%s=%s" % (ns.ip, ns.name.decode('latin-1'))
except:
main = "%s=ENCODE_ERROR" % ns.ip
if 'Responded with: REFUSED' in ns.warnings:
note = '_REFUSED_'
elif 'a.root-servers.net.: Timeout' in ns.warnings:
note = '_TIMEOUT_'
elif 'No answer (NOERROR): a.root-servers.net.' in ns.warnings:
note = '_NOANSWER_'
elif ns.warnings:
note = '_WARNING/%s_' % '/'.join(list(ns.warnings))
else:
note = ''
geo = '/'.join([x for x in [city, region, country_code] if x and not x.isdigit()])
entry = "%-52.52s # %s,%s,%s (%s) %s" % (main, ns.hostname, latitude, longitude, geo, note)
print entry.encode('utf-8')
| apache-2.0 |
linjoahow/2015cdaa-w11 | static/Brython3.1.1-20150328-091302/Lib/xml/dom/domreg.py | 841 | 3402 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name=None, features=()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, str):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except Exception: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError("no suitable DOM implementation found")
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError("bad feature name: %r" % (feature,))
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| gpl-3.0 |
hujiajie/chromium-crosswalk | tools/telemetry/catapult_base/cloud_storage_unittest.py | 8 | 8049 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import mock
from telemetry.testing import system_stub
from catapult_base import cloud_storage
def _FakeReadHash(_):
return 'hashthis!'
def _FakeCalulateHashMatchesRead(_):
return 'hashthis!'
def _FakeCalulateHashNewHash(_):
return 'omgnewhash'
class CloudStorageUnitTest(unittest.TestCase):
def _FakeRunCommand(self, cmd):
pass
def _FakeGet(self, bucket, remote_path, local_path):
pass
def _assertRunCommandRaisesError(self, communicate_strs, error):
stubs = system_stub.Override(cloud_storage, ['open', 'subprocess'])
stubs.open.files = {'fake gsutil path':''}
stubs.subprocess.Popen.returncode_result = 1
try:
for string in communicate_strs:
stubs.subprocess.Popen.communicate_result = ('', string)
self.assertRaises(error, cloud_storage._RunCommand, [])
finally:
stubs.Restore()
def testRunCommandCredentialsError(self):
strs = ['You are attempting to access protected data with no configured',
'Failure: No handler was ready to authenticate.']
self._assertRunCommandRaisesError(strs, cloud_storage.CredentialsError)
def testRunCommandPermissionError(self):
strs = ['status=403', 'status 403', '403 Forbidden']
self._assertRunCommandRaisesError(strs, cloud_storage.PermissionError)
def testRunCommandNotFoundError(self):
strs = ['InvalidUriError', 'No such object', 'No URLs matched',
'One or more URLs matched no', 'InvalidUriError']
self._assertRunCommandRaisesError(strs, cloud_storage.NotFoundError)
def testRunCommandServerError(self):
strs = ['500 Internal Server Error']
self._assertRunCommandRaisesError(strs, cloud_storage.ServerError)
def testRunCommandGenericError(self):
strs = ['Random string']
self._assertRunCommandRaisesError(strs, cloud_storage.CloudStorageError)
def testInsertCreatesValidCloudUrl(self):
orig_run_command = cloud_storage._RunCommand
try:
cloud_storage._RunCommand = self._FakeRunCommand
remote_path = 'test-remote-path.html'
local_path = 'test-local-path.html'
cloud_url = cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET,
remote_path, local_path)
self.assertEqual('https://console.developers.google.com/m/cloudstorage'
'/b/chromium-telemetry/o/test-remote-path.html',
cloud_url)
finally:
cloud_storage._RunCommand = orig_run_command
def testExistsReturnsFalse(self):
stubs = system_stub.Override(cloud_storage, ['subprocess'])
try:
stubs.subprocess.Popen.communicate_result = (
'',
'CommandException: One or more URLs matched no objects.\n')
stubs.subprocess.Popen.returncode_result = 1
self.assertFalse(cloud_storage.Exists('fake bucket',
'fake remote path'))
finally:
stubs.Restore()
@mock.patch('catapult_base.cloud_storage.CalculateHash')
@mock.patch('catapult_base.cloud_storage._GetLocked')
@mock.patch('catapult_base.cloud_storage._PseudoFileLock')
@mock.patch('catapult_base.cloud_storage.os.path')
def testGetIfHashChanged(self, path_mock, lock_mock, get_mock,
calc_hash_mock):
path_mock.exists.side_effect = [False, True, True]
calc_hash_mock.return_value = 'hash'
# The file at |local_path| doesn't exist. We should download file from cs.
ret = cloud_storage.GetIfHashChanged(
'remote_path', 'local_path', 'cs_bucket', 'hash')
self.assertTrue(ret)
get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
get_mock.reset_mock()
self.assertFalse(calc_hash_mock.call_args)
calc_hash_mock.reset_mock()
# A local file exists at |local_path| but has the wrong hash.
# We should download file from cs.
ret = cloud_storage.GetIfHashChanged(
'remote_path', 'local_path', 'cs_bucket', 'new_hash')
self.assertTrue(ret)
get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
get_mock.reset_mock()
calc_hash_mock.assert_called_once_with('local_path')
calc_hash_mock.reset_mock()
# Downloaded file exists locally and has the right hash. Don't download.
ret = cloud_storage.GetIfHashChanged(
'remote_path', 'local_path', 'cs_bucket', 'hash')
self.assertFalse(get_mock.call_args)
self.assertFalse(ret)
calc_hash_mock.reset_mock()
get_mock.reset_mock()
@mock.patch('catapult_base.cloud_storage._PseudoFileLock')
def testGetIfChanged(self, lock_mock):
stubs = system_stub.Override(cloud_storage, ['os', 'open'])
orig_get = cloud_storage._GetLocked
orig_read_hash = cloud_storage.ReadHash
orig_calculate_hash = cloud_storage.CalculateHash
cloud_storage.ReadHash = _FakeReadHash
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
file_path = 'test-file-path.wpr'
hash_path = file_path + '.sha1'
try:
cloud_storage._GetLocked = self._FakeGet
# hash_path doesn't exist.
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
# hash_path exists, but file_path doesn't.
stubs.os.path.files.append(hash_path)
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
# hash_path and file_path exist, and have same hash.
stubs.os.path.files.append(file_path)
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
# hash_path and file_path exist, and have different hashes.
cloud_storage.CalculateHash = _FakeCalulateHashNewHash
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
finally:
stubs.Restore()
cloud_storage._GetLocked = orig_get
cloud_storage.CalculateHash = orig_calculate_hash
cloud_storage.ReadHash = orig_read_hash
def testGetFilesInDirectoryIfChanged(self):
stubs = system_stub.Override(cloud_storage, ['os'])
stubs.os._directory = {'dir1':['1file1.sha1', '1file2.txt', '1file3.sha1'],
'dir2':['2file.txt'], 'dir3':['3file1.sha1']}
stubs.os.path.dirs = ['real_dir_path']
def IncrementFilesUpdated(*_):
IncrementFilesUpdated.files_updated += 1
IncrementFilesUpdated.files_updated = 0
orig_get_if_changed = cloud_storage.GetIfChanged
cloud_storage.GetIfChanged = IncrementFilesUpdated
try:
self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
os.path.abspath(os.sep), cloud_storage.PUBLIC_BUCKET)
self.assertEqual(0, IncrementFilesUpdated.files_updated)
self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
'fake_dir_path', cloud_storage.PUBLIC_BUCKET)
self.assertEqual(0, IncrementFilesUpdated.files_updated)
cloud_storage.GetFilesInDirectoryIfChanged('real_dir_path',
cloud_storage.PUBLIC_BUCKET)
self.assertEqual(3, IncrementFilesUpdated.files_updated)
finally:
cloud_storage.GetIfChanged = orig_get_if_changed
stubs.Restore()
def testCopy(self):
orig_run_command = cloud_storage._RunCommand
def AssertCorrectRunCommandArgs(args):
self.assertEqual(expected_args, args)
cloud_storage._RunCommand = AssertCorrectRunCommandArgs
expected_args = ['cp', 'gs://bucket1/remote_path1',
'gs://bucket2/remote_path2']
try:
cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
finally:
cloud_storage._RunCommand = orig_run_command
| bsd-3-clause |
prakritish/ansible | lib/ansible/plugins/callback/skippy.py | 116 | 1317 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
def v2_runner_on_skipped(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
| gpl-3.0 |
uaarg/missioncommander | main.py | 1 | 6136 | #!/usr/bin/python3
import sys, getopt
import log, logging
import os
import argparse
import signal
import threading
from config import *
import ivylinker
from ui import UI
from database import importxml
from md5checker import findMD5
from interop.client import AsyncClient
from interoperability import MissionInformation, TelemetryThread, ObstacleThread
def argParser():
'''
Uses argparse to read arguements passed on startup. To pass a custom
arguement on startup simply add the flag and then the arguement, as per below
python3 ./main.py -l thisURL
python3 ./main.py -l thisURL --flightPlan webster
TO ADD ANOTHER ARGUEMENT:
1) Add default value on config.py and on defaultArgs
2) Add input arguement line
(parser.add_argument('-shortFlag', '--longFlag', help='delimited list input', type=str))
NOTE: args[longFlag] = newArguement
3) Access arg in main function by argDict['longflag']
'''
defaultArgs = {'url': urlDefault, 'username':usernameDefault,
'password': passwordDefault}
parser = argparse.ArgumentParser()
# Input arguements
parser.add_argument('-l', '--url', help='delimited list input', type=str)
parser.add_argument('-u', '--username', help='delimited list input', type=str)
parser.add_argument('-p', '--password', help='delimited list input', type=str)
# vars parses the namespace into a dictionary so we can iterate over the names
args = vars(parser.parse_args())
for key in args.keys():
if args[key] is None:
args[key] = defaultArgs[key]
return args
class MissionCommander():
def __init__(self, ivy_sender, logger):
"""
Initializes a MissionCommander object.
Args:
current_flight_plan: ???
ivy_sender: An ivylinker.IvySender object to which a
message handler can be bound.
"""
self.logger = logger
self.foundXML = False
self.ac_id = None
self.loadedXML = threading.Event()
self.loadedXML.clear()
from database import BagOfHolding
self.db = BagOfHolding()
self.ivy_sender = ivy_sender
self.ivy_sender.bindMessageHandler(self.ivyMsgHandler)
self.loadedXML.wait()
def loadXMLs(self, filepath, ac_id):
importxml.bindDBandFilepath(os.path.join(*[filepath, 'flight_plan.xml']), self.db, ac_id)
importxml.parseXML()
prefixMandT = self.determineFlightPlan()
importxml.bindDBandFilepath(os.path.join('MissAndTsk', prefixMandT + 'MissionsAndTasks.xml'), self.db, ac_id)
importxml.parseXML()
self.loadedXML.set()
def determineFlightPlan(self):
'''
This is a dumb way to find out what MissionsAndTasks file we should use, but it works
'''
homePosition = self.db.waypoints['OrIgIn'].get_latlon()
if ((str(homePosition['lat'])[0:6] == '53.638') and (str(homePosition['lon'])[0:8] == '-113.286')):
return 'Bremner'
if ((str(homePosition['lat'])[0:6] == '38.144') and (str(homePosition['lon'])[0:7] == '-76.427')):
return 'Webster'
else:
logger.critical('Did not find MissionsAndTasks file - should make one presently')
return ''
def initiSync(self):
from synchronizer import BagOfSynchronizing
self.sync = BagOfSynchronizing()
self.sync.startThread()
def ivyMsgHandler(self, ac_id, msg):
if self.foundXML:
if (msg.name == "WALDO_MSG"):
self.db.updateTelemetry(msg)
if (msg.name == "WP_MOVED"):
self.db.updateWaypoint(msg)
if (msg.name == "MISSION_STATUS"):
self.db.updateAirMissionStatus(msg)
if (msg.name == "NAVIGATION"):
self.db.updateCurrentFlightBlock(msg.cur_block)
else:
if (msg.name == "ALIVE"):
filepath = findMD5(msg.md5sum, self.logger)
if filepath != None:
self.loadXMLs(filepath, ac_id)
self.foundXML = True
self.ivy_sender.AC_ID = ac_id
self.ac_id = ac_id
if __name__ == '__main__':
log.init()
argDict = argParser()
logger = logging.getLogger(__name__)
serverIsUp = True
try:
interop = AsyncClient(argDict['url'], argDict['username'], argDict['password'], timeout = 1000)
except Exception as e:
logging.critical('Failed to connect to interop server due to: \n'+str(e)+'.\nOperation of Interop threads are supressed.\n')
serverIsUp = False
ivy_sender = ivylinker.IvySender(verbose=True)
mc = MissionCommander(ivy_sender, logger)
ui = UI(mc.db, ivy_sender.sendMessage, mc.ac_id)
# Allow Ctrl+C to kill the program with no cleanup
signal.signal(signal.SIGINT, signal.SIG_DFL)
if serverIsUp:
missionInfo = MissionInformation(interop, ivy_sender.sendMessage)
missionInfo.getMissionInformation()
try:
missionInfo.sendIvyOffAxisShape()
missionInfo.sendIvyEmergentTarget(mc.ac_id,mc.db)
#missionInfo.sendIvyGroupOfWaypoints(mc.ac_id,mc.db, 'OpArea')
#missionInfo.sendIvyGroupOfWaypoints(mc.ac_id,mc.db, 'SearchArea')
#missionInfo.sendIvyGroupOfWaypoints(mc.ac_id,mc.db, 'WptNav')
obstacle_thread = ObstacleThread(interop, ivy_sender.sendMessage)
obstacle_thread.start()
except Exception as e:
logging.critical('Failed to plot interop details (OAX, LKN, OpArea, SearchArea, WptNav and Obstacles) because of \n' + str(e))
telem_thread = TelemetryThread(interop, mc.db.airplane)
telem_thread.start()
ui.run() # Finishes when UI window is closed
print('Shutting down...')
if serverIsUp:
telem_thread.stop()
if not(missionInfo.mission_info is None):
obstacle_thread.stop()
ivy_sender.shutdown()
if serverIsUp:
telem_thread.join()
if not(missionInfo.mission_info is None):
obstacle_thread.join()
| gpl-2.0 |
liorvh/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-3.0 |
bbenko/shinkicker | django/core/handlers/modpython.py | 189 | 8279 | import os
from pprint import pformat
import sys
from warnings import warn
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, smart_str, iri_to_uri
from django.utils.log import getLogger
logger = getLogger('django.request')
# NOTE: do *not* import settings (or any module which eventually imports
# settings) until after ModPythonHandler has been called; otherwise os.environ
# won't be set up correctly (with respect to settings).
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
self._post_parse_error = False
self._stream = self._req
self._read_started = False
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
if self._post_parse_error:
post = '<could not parse>'
else:
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(self.path, unicode(get), unicode(post),
unicode(cookies), unicode(meta)))
def get_full_path(self):
# RFC 3986 requires self._req.args to be in the ASCII range, but this
# doesn't always happen, so rather than crash, we defensively encode it.
return '%s%s' % (self.path, self._req.args and ('?' + iri_to_uri(self._req.args)) or '')
def is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.headers_in.get('content-length', 0),
'CONTENT_TYPE': self._req.headers_in.get('content-type'),
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.connection.local_addr[1],
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
warn(('The mod_python handler is deprecated; use a WSGI or FastCGI server instead.'),
PendingDeprecationWarning)
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError): %s' % request.path,
exc_info=sys.exc_info(),
extra={
'status_code': 400,
'request': request
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
| bsd-3-clause |
johankaito/fufuka | microblog/venv/lib/python2.7/site-packages/requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| apache-2.0 |
bezi/globalizr | globalizr/api/interfaces/companies.py | 1 | 2501 | import urllib2
import urllib
import json
import subprocess
def genAllCodes():
base_url = "http://query.yahooapis.com/v1/public/yql?format=json&env=store%3A%2F%2F\
datatables.org%2Falltableswithkeys&q="
codes = []
for industry in range(110, 137):
print "Industry: {}".format(industry)
query = "select * from yahoo.finance.industry where id=\"{}\"".format(industry)
parsed = toJson(base_url, query)
if not (parsed["query"]["results"]["industry"]["name"] == ""):
companies = parsed["query"]["results"]["industry"]["company"]
for company in companies:
codes.append(company["symbol"])
return codes
def toJson(baseURL, args):
args = urllib.quote(args, '')
req =urllib2.Request(baseURL + args, None)
opener = urllib2.build_opener()
f = opener.open(req)
data = f.read()
return json.loads(data);
def genStats(code, metric):
stockStats_base = "http://query.yahooapis.com/v1/public/yql?format=json&env=store%3A%2F%2F\
datatables.org%2Falltableswithkeys&q="
stock = "select * from yahoo.finance.keystats where symbol=\'" + code + "\'"
parsed = toJson(stockStats_base, stock)
if parsed["query"]["results"]["stats"][]
return parsed["query"]["results"]["stats"]
def genLoc(code):
city = subprocess.check_output(["./getaddr.sh", code])
google_geocode_base = "http://maps.googleapis.com/maps/api/geocode/json?sensor=false&address="
parsed = toJson(google_geocode_base, city);
if not (parsed["status"] == "ZERO_RESULTS"):
return (parsed["results"][0]["geometry"]["location"]["lng"],
parsed["results"][0]["geometry"]["location"]["lat"])
def parse_interface_companies(metric):
codes = genAllCodes();
for code in codes:
if genLoc(code) == None:
if metric != "LastTradePriceOnly" || metric != :
data = genStats()
if metric == "current price":
return
if metric == "total cash":
return
if metric == "total debt":
return
else:
data = genQuote(genAllCodes())
return metric
#print(genQuote("LMT"))
#print(genStats("LMT"))
print(genLoc("LMT") == None)
#print(genQuote("A7Z.DE"))
#print(genStats("A7Z.DE"))
print(genLoc("A7Z.DE") == None)
#print(genQuote("JA9.SI"))
#print(genStats("JA9.SI"))
print(genLoc("JA9.SI") == None)
#print(genQuote("adfadfadf"))
#print(genStats("adfadfadf"))
print(genLoc("adfadfadf") == None)
| mit |
frappe/frappe | frappe/email/doctype/email_account/test_email_account.py | 2 | 17096 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import os
import email
import unittest
from datetime import datetime, timedelta
from frappe.email.receive import InboundMail, SentEmailInInboxError, Email
from frappe.email.email_body import get_message_id
import frappe
from frappe.test_runner import make_test_records
from frappe.core.doctype.communication.email import make
from frappe.desk.form.load import get_attachments
from frappe.email.doctype.email_account.email_account import notify_unreplied
make_test_records("User")
make_test_records("Email Account")
class TestEmailAccount(unittest.TestCase):
@classmethod
def setUpClass(cls):
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.db_set("enable_incoming", 1)
email_account.db_set("enable_auto_reply", 1)
@classmethod
def tearDownClass(cls):
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.db_set("enable_incoming", 0)
def setUp(self):
frappe.flags.mute_emails = False
frappe.flags.sent_mail = None
frappe.db.sql('delete from `tabEmail Queue`')
frappe.db.sql('delete from `tabUnhandled Email`')
def get_test_mail(self, fname):
with open(os.path.join(os.path.dirname(__file__), "test_mails", fname), "r") as f:
return f.read()
def test_incoming(self):
cleanup("test_sender@example.com")
test_mails = [self.get_test_mail('incoming-1.raw')]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("test_receiver@example.com" in comm.recipients)
# check if todo is created
self.assertTrue(frappe.db.get_value(comm.reference_doctype, comm.reference_name, "name"))
def test_unread_notification(self):
self.test_incoming()
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
comm.db_set("creation", datetime.now() - timedelta(seconds = 30 * 60))
frappe.db.sql("DELETE FROM `tabEmail Queue`")
notify_unreplied()
self.assertTrue(frappe.db.get_value("Email Queue", {"reference_doctype": comm.reference_doctype,
"reference_name": comm.reference_name, "status":"Not Sent"}))
def test_incoming_with_attach(self):
cleanup("test_sender@example.com")
existing_file = frappe.get_doc({'doctype': 'File', 'file_name': 'erpnext-conf-14.png'})
frappe.delete_doc("File", existing_file.name)
with open(os.path.join(os.path.dirname(__file__), "test_mails", "incoming-2.raw"), "r") as testfile:
test_mails = [testfile.read()]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("test_receiver@example.com" in comm.recipients)
# check attachment
attachments = get_attachments(comm.doctype, comm.name)
self.assertTrue("erpnext-conf-14.png" in [f.file_name for f in attachments])
# cleanup
existing_file = frappe.get_doc({'doctype': 'File', 'file_name': 'erpnext-conf-14.png'})
frappe.delete_doc("File", existing_file.name)
def test_incoming_attached_email_from_outlook_plain_text_only(self):
cleanup("test_sender@example.com")
with open(os.path.join(os.path.dirname(__file__), "test_mails", "incoming-3.raw"), "r") as f:
test_mails = [f.read()]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("From: "Microsoft Outlook" <test_sender@example.com>" in comm.content)
self.assertTrue("This is an e-mail message sent automatically by Microsoft Outlook while" in comm.content)
def test_incoming_attached_email_from_outlook_layers(self):
cleanup("test_sender@example.com")
with open(os.path.join(os.path.dirname(__file__), "test_mails", "incoming-4.raw"), "r") as f:
test_mails = [f.read()]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("From: "Microsoft Outlook" <test_sender@example.com>" in comm.content)
self.assertTrue("This is an e-mail message sent automatically by Microsoft Outlook while" in comm.content)
def test_outgoing(self):
make(subject = "test-mail-000", content="test mail 000", recipients="test_receiver@example.com",
send_email=True, sender="test_sender@example.com")
mail = email.message_from_string(frappe.get_last_doc("Email Queue").message)
self.assertTrue("test-mail-000" in mail.get("Subject"))
def test_sendmail(self):
frappe.sendmail(sender="test_sender@example.com", recipients="test_recipient@example.com",
content="test mail 001", subject="test-mail-001", delayed=False)
sent_mail = email.message_from_string(frappe.safe_decode(frappe.flags.sent_mail))
self.assertTrue("test-mail-001" in sent_mail.get("Subject"))
def test_print_format(self):
make(sender="test_sender@example.com", recipients="test_recipient@example.com",
content="test mail 001", subject="test-mail-002", doctype="Email Account",
name="_Test Email Account 1", print_format="Standard", send_email=True)
sent_mail = email.message_from_string(frappe.get_last_doc("Email Queue").message)
self.assertTrue("test-mail-002" in sent_mail.get("Subject"))
def test_threading(self):
cleanup(["in", ['test_sender@example.com', 'test@example.com']])
# send
sent_name = make(subject = "Test", content="test content",
recipients="test_receiver@example.com", sender="test@example.com",doctype="ToDo",name=frappe.get_last_doc("ToDo").name,
send_email=True)["name"]
sent_mail = email.message_from_string(frappe.get_last_doc("Email Queue").message)
with open(os.path.join(os.path.dirname(__file__), "test_mails", "reply-1.raw"), "r") as f:
raw = f.read()
raw = raw.replace("<-- in-reply-to -->", sent_mail.get("Message-Id"))
test_mails = [raw]
# parse reply
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
sent = frappe.get_doc("Communication", sent_name)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertEqual(comm.reference_doctype, sent.reference_doctype)
self.assertEqual(comm.reference_name, sent.reference_name)
def test_threading_by_subject(self):
cleanup(["in", ['test_sender@example.com', 'test@example.com']])
with open(os.path.join(os.path.dirname(__file__), "test_mails", "reply-2.raw"), "r") as f:
test_mails = [f.read()]
with open(os.path.join(os.path.dirname(__file__), "test_mails", "reply-3.raw"), "r") as f:
test_mails.append(f.read())
# parse reply
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm_list = frappe.get_all("Communication", filters={"sender":"test_sender@example.com"},
fields=["name", "reference_doctype", "reference_name"])
# both communications attached to the same reference
self.assertEqual(comm_list[0].reference_doctype, comm_list[1].reference_doctype)
self.assertEqual(comm_list[0].reference_name, comm_list[1].reference_name)
def test_threading_by_message_id(self):
cleanup()
frappe.db.sql("""delete from `tabEmail Queue`""")
# reference document for testing
event = frappe.get_doc(dict(doctype='Event', subject='test-message')).insert()
# send a mail against this
frappe.sendmail(recipients='test@example.com', subject='test message for threading',
message='testing', reference_doctype=event.doctype, reference_name=event.name)
last_mail = frappe.get_doc('Email Queue', dict(reference_name=event.name))
# get test mail with message-id as in-reply-to
with open(os.path.join(os.path.dirname(__file__), "test_mails", "reply-4.raw"), "r") as f:
test_mails = [f.read().replace('{{ message_id }}', last_mail.message_id)]
# pull the mail
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm_list = frappe.get_all("Communication", filters={"sender":"test_sender@example.com"},
fields=["name", "reference_doctype", "reference_name"])
# check if threaded correctly
self.assertEqual(comm_list[0].reference_doctype, event.doctype)
self.assertEqual(comm_list[0].reference_name, event.name)
def test_auto_reply(self):
cleanup("test_sender@example.com")
test_mails = [self.get_test_mail('incoming-1.raw')]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue(frappe.db.get_value("Email Queue", {"reference_doctype": comm.reference_doctype,
"reference_name": comm.reference_name}))
def test_handle_bad_emails(self):
mail_content = self.get_test_mail(fname="incoming-1.raw")
message_id = Email(mail_content).mail.get('Message-ID')
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.handle_bad_emails(uid=-1, raw=mail_content, reason="Testing")
self.assertTrue(frappe.db.get_value("Unhandled Email", {'message_id': message_id}))
class TestInboundMail(unittest.TestCase):
@classmethod
def setUpClass(cls):
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.db_set("enable_incoming", 1)
@classmethod
def tearDownClass(cls):
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.db_set("enable_incoming", 0)
def setUp(self):
cleanup()
frappe.db.sql('delete from `tabEmail Queue`')
frappe.db.sql('delete from `tabToDo`')
def get_test_mail(self, fname):
with open(os.path.join(os.path.dirname(__file__), "test_mails", fname), "r") as f:
return f.read()
def new_doc(self, doctype, **data):
doc = frappe.new_doc(doctype)
for field, value in data.items():
setattr(doc, field, value)
doc.insert()
return doc
def new_communication(self, **kwargs):
defaults = {
'subject': "Test Subject"
}
d = {**defaults, **kwargs}
return self.new_doc('Communication', **d)
def new_email_queue(self, **kwargs):
defaults = {
'message_id': get_message_id().strip(" <>")
}
d = {**defaults, **kwargs}
return self.new_doc('Email Queue', **d)
def new_todo(self, **kwargs):
defaults = {
'description': "Description"
}
d = {**defaults, **kwargs}
return self.new_doc('ToDo', **d)
def test_self_sent_mail(self):
"""Check that we raise SentEmailInInboxError if the inbound mail is self sent mail.
"""
mail_content = self.get_test_mail(fname="incoming-self-sent.raw")
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 1, 1)
with self.assertRaises(SentEmailInInboxError):
inbound_mail.process()
def test_mail_exist_validation(self):
"""Do not create communication record if the mail is already downloaded into the system.
"""
mail_content = self.get_test_mail(fname="incoming-1.raw")
message_id = Email(mail_content).message_id
# Create new communication record in DB
communication = self.new_communication(message_id=message_id)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
new_communiction = inbound_mail.process()
# Make sure that uid is changed to new uid
self.assertEqual(new_communiction.uid, 12345)
self.assertEqual(communication.name, new_communiction.name)
def test_find_parent_email_queue(self):
"""If the mail is reply to the already sent mail, there will be a email queue record.
"""
# Create email queue record
queue_record = self.new_email_queue()
mail_content = self.get_test_mail(fname="reply-4.raw").replace(
"{{ message_id }}", queue_record.message_id
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
parent_queue = inbound_mail.parent_email_queue()
self.assertEqual(queue_record.name, parent_queue.name)
def test_find_parent_communication_through_queue(self):
"""Find parent communication of an inbound mail.
Cases where parent communication does exist:
1. No parent communication is the mail is not a reply.
Cases where parent communication does not exist:
2. If mail is not a reply to system sent mail, then there can exist co
"""
# Create email queue record
communication = self.new_communication()
queue_record = self.new_email_queue(communication=communication.name)
mail_content = self.get_test_mail(fname="reply-4.raw").replace(
"{{ message_id }}", queue_record.message_id
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
parent_communication = inbound_mail.parent_communication()
self.assertEqual(parent_communication.name, communication.name)
def test_find_parent_communication_for_self_reply(self):
"""If the inbound email is a reply but not reply to system sent mail.
Ex: User replied to his/her mail.
"""
message_id = "new-message-id"
mail_content = self.get_test_mail(fname="reply-4.raw").replace(
"{{ message_id }}", message_id
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
parent_communication = inbound_mail.parent_communication()
self.assertFalse(parent_communication)
communication = self.new_communication(message_id=message_id)
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
parent_communication = inbound_mail.parent_communication()
self.assertEqual(parent_communication.name, communication.name)
def test_find_parent_communication_from_header(self):
"""Incase of header contains parent communication name
"""
communication = self.new_communication()
mail_content = self.get_test_mail(fname="reply-4.raw").replace(
"{{ message_id }}", f"<{communication.name}@{frappe.local.site}>"
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
parent_communication = inbound_mail.parent_communication()
self.assertEqual(parent_communication.name, communication.name)
def test_reference_document(self):
# Create email queue record
todo = self.new_todo()
# communication = self.new_communication(reference_doctype='ToDo', reference_name=todo.name)
queue_record = self.new_email_queue(reference_doctype='ToDo', reference_name=todo.name)
mail_content = self.get_test_mail(fname="reply-4.raw").replace(
"{{ message_id }}", queue_record.message_id
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
reference_doc = inbound_mail.reference_document()
self.assertEqual(todo.name, reference_doc.name)
def test_reference_document_by_record_name_in_subject(self):
# Create email queue record
todo = self.new_todo()
mail_content = self.get_test_mail(fname="incoming-subject-placeholder.raw").replace(
"{{ subject }}", f"RE: (#{todo.name})"
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
reference_doc = inbound_mail.reference_document()
self.assertEqual(todo.name, reference_doc.name)
def test_reference_document_by_subject_match(self):
subject = "New todo"
todo = self.new_todo(sender='test_sender@example.com', description=subject)
mail_content = self.get_test_mail(fname="incoming-subject-placeholder.raw").replace(
"{{ subject }}", f"RE: {subject}"
)
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
reference_doc = inbound_mail.reference_document()
self.assertEqual(todo.name, reference_doc.name)
def test_create_communication_from_mail(self):
# Create email queue record
mail_content = self.get_test_mail(fname="incoming-2.raw")
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
inbound_mail = InboundMail(mail_content, email_account, 12345, 1)
communication = inbound_mail.process()
self.assertTrue(communication.is_first)
self.assertTrue(communication._attachments)
def cleanup(sender=None):
filters = {}
if sender:
filters.update({"sender": sender})
names = frappe.get_list("Communication", filters=filters, fields=["name"])
for name in names:
frappe.delete_doc_if_exists("Communication", name.name)
frappe.delete_doc_if_exists("Communication Link", {"parent": name.name})
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.