repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
gurneyalex/odoo | addons/mass_mailing/models/mailing_contact.py | 5 | 5454 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.osv import expression
class MassMailingContactListRel(models.Model):
""" Intermediate model between mass mailing list and mass mailing contact
Indicates if a contact is opted out for a particular list
"""
_name = 'mailing.contact.subscription'
_description = 'Mass Mailing Subscription Information'
_table = 'mailing_contact_list_rel'
_rec_name = 'contact_id'
contact_id = fields.Many2one('mailing.contact', string='Contact', ondelete='cascade', required=True)
list_id = fields.Many2one('mailing.list', string='Mailing List', ondelete='cascade', required=True)
opt_out = fields.Boolean(string='Opt Out',
help='The contact has chosen not to receive mails anymore from this list', default=False)
unsubscription_date = fields.Datetime(string='Unsubscription Date')
message_bounce = fields.Integer(related='contact_id.message_bounce', store=False, readonly=False)
is_blacklisted = fields.Boolean(related='contact_id.is_blacklisted', store=False, readonly=False)
_sql_constraints = [
('unique_contact_list', 'unique (contact_id, list_id)',
'A contact cannot be subscribed multiple times to the same list!')
]
@api.model
def create(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).create(vals)
def write(self, vals):
if 'opt_out' in vals:
vals['unsubscription_date'] = vals['opt_out'] and fields.Datetime.now()
return super(MassMailingContactListRel, self).write(vals)
class MassMailingContact(models.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mailing.contact'
_inherit = ['mail.thread.blacklist']
_description = 'Mailing Contact'
_order = 'email'
name = fields.Char()
company_name = fields.Char(string='Company Name')
title_id = fields.Many2one('res.partner.title', string='Title')
email = fields.Char('Email')
list_ids = fields.Many2many(
'mailing.list', 'mailing_contact_list_rel',
'contact_id', 'list_id', string='Mailing Lists')
subscription_list_ids = fields.One2many('mailing.contact.subscription', 'contact_id', string='Subscription Information')
country_id = fields.Many2one('res.country', string='Country')
tag_ids = fields.Many2many('res.partner.category', string='Tags')
opt_out = fields.Boolean('Opt Out', compute='_compute_opt_out', search='_search_opt_out',
help='Opt out flag for a specific mailing list.'
'This field should not be used in a view without a unique and active mailing list context.')
@api.model
def _search_opt_out(self, operator, value):
# Assumes operator is '=' or '!=' and value is True or False
if operator != '=':
if operator == '!=' and isinstance(value, bool):
value = not value
else:
raise NotImplementedError()
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
contacts = self.env['mailing.contact.subscription'].search([('list_id', '=', active_list_id)])
return [('id', 'in', [record.contact_id.id for record in contacts if record.opt_out == value])]
else:
return expression.FALSE_DOMAIN if value else expression.TRUE_DOMAIN
@api.depends('subscription_list_ids')
def _compute_opt_out(self):
if 'default_list_ids' in self._context and isinstance(self._context['default_list_ids'], (list, tuple)) and len(self._context['default_list_ids']) == 1:
[active_list_id] = self._context['default_list_ids']
for record in self:
active_subscription_list = record.subscription_list_ids.filtered(lambda l: l.list_id.id == active_list_id)
record.opt_out = active_subscription_list.opt_out
else:
for record in self:
record.opt_out = False
def get_name_email(self, name):
name, email = self.env['res.partner']._parse_partner_name(name)
if name and not email:
email = name
if email and not name:
name = email
return name, email
@api.model
def name_create(self, name):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email})
return contact.name_get()[0]
@api.model
def add_to_list(self, name, list_id):
name, email = self.get_name_email(name)
contact = self.create({'name': name, 'email': email, 'list_ids': [(4, list_id)]})
return contact.name_get()[0]
def _message_get_default_recipients(self):
return {r.id: {
'partner_ids': [],
'email_to': r.email_normalized,
'email_cc': False}
for r in self
}
| agpl-3.0 |
programadorjc/django | django/utils/deprecation.py | 199 | 2627 | from __future__ import absolute_import
import inspect
import warnings
class RemovedInDjango20Warning(PendingDeprecationWarning):
pass
class RemovedInDjango110Warning(DeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango110Warning
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
| bsd-3-clause |
trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-core/src/python/gnuradio/blks2impl/filterbank.py | 18 | 5981 | #
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
from gnuradio import gr, gru
def _generate_synthesis_taps(mpoints):
return [] # FIXME
def _split_taps(taps, mpoints):
assert (len(taps) % mpoints) == 0
result = [list() for x in range(mpoints)]
for i in xrange(len(taps)):
(result[i % mpoints]).append(taps[i])
return [tuple(x) for x in result]
class synthesis_filterbank(gr.hier_block2):
"""
Uniformly modulated polyphase DFT filter bank: synthesis
See http://cnx.org/content/m10424/latest
"""
def __init__(self, mpoints, taps=None):
"""
Takes M complex streams in, produces single complex stream out
that runs at M times the input sample rate
@param mpoints: number of freq bins/interpolation factor/subbands
@param taps: filter taps for subband filter
The channel spacing is equal to the input sample rate.
The total bandwidth and output sample rate are equal the input
sample rate * nchannels.
Output stream to frequency mapping:
channel zero is at zero frequency.
if mpoints is odd:
Channels with increasing positive frequencies come from
channels 1 through (N-1)/2.
Channel (N+1)/2 is the maximum negative frequency, and
frequency increases through N-1 which is one channel lower
than the zero frequency.
if mpoints is even:
Channels with increasing positive frequencies come from
channels 1 through (N/2)-1.
Channel (N/2) is evenly split between the max positive and
negative bins.
Channel (N/2)+1 is the maximum negative frequency, and
frequency increases through N-1 which is one channel lower
than the zero frequency.
Channels near the frequency extremes end up getting cut
off by subsequent filters and therefore have diminished
utility.
"""
item_size = gr.sizeof_gr_complex
gr.hier_block2.__init__(self, "synthesis_filterbank",
gr.io_signature(mpoints, mpoints, item_size), # Input signature
gr.io_signature(1, 1, item_size)) # Output signature
if taps is None:
taps = _generate_synthesis_taps(mpoints)
# pad taps to multiple of mpoints
r = len(taps) % mpoints
if r != 0:
taps = taps + (mpoints - r) * (0,)
# split in mpoints separate set of taps
sub_taps = _split_taps(taps, mpoints)
self.ss2v = gr.streams_to_vector(item_size, mpoints)
self.ifft = gr.fft_vcc(mpoints, False, [])
self.v2ss = gr.vector_to_streams(item_size, mpoints)
# mpoints filters go in here...
self.ss2s = gr.streams_to_stream(item_size, mpoints)
for i in range(mpoints):
self.connect((self, i), (self.ss2v, i))
self.connect(self.ss2v, self.ifft, self.v2ss)
# build mpoints fir filters...
for i in range(mpoints):
f = gr.fft_filter_ccc(1, sub_taps[i])
self.connect((self.v2ss, i), f)
self.connect(f, (self.ss2s, i))
self.connect(self.ss2s, self)
class analysis_filterbank(gr.hier_block2):
"""
Uniformly modulated polyphase DFT filter bank: analysis
See http://cnx.org/content/m10424/latest
"""
def __init__(self, mpoints, taps=None):
"""
Takes 1 complex stream in, produces M complex streams out
that runs at 1/M times the input sample rate
@param mpoints: number of freq bins/interpolation factor/subbands
@param taps: filter taps for subband filter
Same channel to frequency mapping as described above.
"""
item_size = gr.sizeof_gr_complex
gr.hier_block2.__init__(self, "analysis_filterbank",
gr.io_signature(1, 1, item_size), # Input signature
gr.io_signature(mpoints, mpoints, item_size)) # Output signature
if taps is None:
taps = _generate_synthesis_taps(mpoints)
# pad taps to multiple of mpoints
r = len(taps) % mpoints
if r != 0:
taps = taps + (mpoints - r) * (0,)
# split in mpoints separate set of taps
sub_taps = _split_taps(taps, mpoints)
# print >> sys.stderr, "mpoints =", mpoints, "len(sub_taps) =", len(sub_taps)
self.s2ss = gr.stream_to_streams(item_size, mpoints)
# filters here
self.ss2v = gr.streams_to_vector(item_size, mpoints)
self.fft = gr.fft_vcc(mpoints, True, [])
self.v2ss = gr.vector_to_streams(item_size, mpoints)
self.connect(self, self.s2ss)
# build mpoints fir filters...
for i in range(mpoints):
f = gr.fft_filter_ccc(1, sub_taps[mpoints-i-1])
self.connect((self.s2ss, i), f)
self.connect(f, (self.ss2v, i))
self.connect((self.v2ss, i), (self, i))
self.connect(self.ss2v, self.fft, self.v2ss)
| gpl-3.0 |
matthewAURA/MRR-Website | mrr_website/commands.py | 23 | 4250 | # -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map
.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
| bsd-3-clause |
luciferz2012/ShadowsocksFork | shadowsocks/encrypt.py | 990 | 5180 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| apache-2.0 |
dannywxh/mypy | spider/avso.py | 1 | 5739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
from bs4 import BeautifulSoup
import common
import requests
reload(sys)
#print sys.getdefaultencoding()
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def download_html(url):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "download from "+url+"\n"
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
#print "status_code",response.status_code
if response.ok:
#print response.content.encode("gbk")
#return StringIO.StringIO(response.content)
data=response.content
return data
#####以下处理 avso ,可以封装成一个类 #################
def get_cast_onepage_by_avso(cast_name,pagecount=1):
url=r'https://avso.pw/cn/search/'+cast_name+'/page/'+str(pagecount)
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"html.parser")
ret=[]
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
divs=soup.find_all('div',class_="item")
if divs==None:
print "divs is None!"
return
for div in divs:
info=div.find('div',class_="photo-info")
name=div.find('span')
#print name.text
datas=info.find_all('date')
ret.append((name.text,datas[0].text,datas[1].text))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
def get_cast_allpage_by_avso(cast_name):
all_info=[]
for i in range(1,10):
info= get_cast_onepage_by_avso(cast_name,i)
if info==-1:
break
else:
all_info+=info
print all_info
savefile="d:\\"+cast_name+".txt"
with open(savefile,"w") as fs:
for name,vid,date in all_info:
fs.write(name.encode("utf-8")+"\t"+vid+"\t"+date+"\n")
print "file create done!"
# step:1
def serch_movie_byvid(vid):
url='https://avso.pw/cn/search/'+vid
#url='https://avso.pw/cn/search/'+vid #110615_185'
data=download_html(url)
if data:
#print response.content.encode("gbk")
soup = BeautifulSoup(data,"lxml")
ret=[]
try:
notfound=soup.find('div',class_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
types = soup.select('div.item > a')
items = soup.select('div.item > a > div.photo-info > span')
for a,item in zip(types,items):
#print a['class'][1],a['href'],item.get_text() # ['movie-box', 'mcaribbeancom']
cast=get_movie_cast(a['href'])
ret.append((item.get_text(),cast,a['class'][1]))
return ret
except Exception,e:
print e
return -1
#print "vcast not found!"
#step 2:得到片子的所有演员名
def get_movie_cast(url):
# url=r' https://avso.pw/cn/movie/yus'
data=download_html(url)
ret=[]
if data:
soup = BeautifulSoup(data,"lxml")
try:
notfound=soup.find('div',clasas_="alert alert-danger")
if notfound!=None:
print "Not Found!"
return -1
actress=soup.find_all('a',class_="avatar-box")
for a in actress:
span=a.find("span")
ret.append(span.text)
return " ".join(ret)
except Exception,e:
print e
return -1
#print "vcast not found!"
#wrapper function
def get_vidlist_full_info():
#idlist=['082516-001','080117_01','062717_110']
idlist= walkpath(r"e:\\avstore")
print idlist
infos=[]
for id in idlist:
info = serch_movie_byvid(id)
if info!=-1:
infos+=info
#print infos
infofile='d:\\info.txt'
with open(infofile,"w") as f:
for a,b,c in infos:
print a,b,c
f.write(a+","+b+","+c+"\n")
print "File saved!%s"%infofile
def walkpath(path):
files=[x for x in os.listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
p,f=os.path.split(line)
id=common.format_rule1(f.replace("\n",""))
if id!="":
#store.append((id,txtfile))
store.append(id)
return store
if __name__ == '__main__' :
#TXT_STORE_PATH="d:\\avstore\\"
get_vidlist_full_info()
# idlist=['082516-001','080117_01','062717_110']
#ret=serch_movie_byvid('082516-001')
#for a,b,c in ret:
# print a,b,c
| apache-2.0 |
roadmapper/ansible | test/units/modules/network/onyx/test_onyx_ntp_servers_peers.py | 9 | 5679 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_ntp_servers_peers
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxNtpServersPeersModule(TestOnyxModule):
module = onyx_ntp_servers_peers
enabled = False
def setUp(self):
self.enabled = False
super(TestOnyxNtpServersPeersModule, self).setUp()
self.mock_get_config = patch.object(
onyx_ntp_servers_peers.OnyxNTPServersPeersModule, "_show_peers_servers_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxNtpServersPeersModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_ntp_servers_peers_show.cfg'
data = load_fixture(config_file)
self.get_config.return_value = data
self.load_config.return_value = None
def test_ntp_peer_state_no_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
enabled='yes')]))
self.execute_module(changed=False)
def test_ntp_peer_state_with_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
enabled='no')]))
commands = ['ntp peer 1.1.1.1 disable']
self.execute_module(changed=True, commands=commands)
def test_ntp_peer_version_no_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
version='4')]))
self.execute_module(changed=False)
def test_ntp_peer_version_with_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
version='3')]))
commands = ['ntp peer 1.1.1.1 version 3']
self.execute_module(changed=True, commands=commands)
def test_ntp_peer_key_id_no_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
key_id='5')]))
self.execute_module(changed=False)
def test_ntp_peer_key_id_with_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
key_id='6')]))
commands = ['ntp peer 1.1.1.1 keyID 6']
self.execute_module(changed=True, commands=commands)
def test_ntp_peer_delete_with_change(self):
set_module_args(dict(peer=[dict(ip_or_name='1.1.1.1',
state='absent')]))
commands = ['no ntp peer 1.1.1.1']
self.execute_module(changed=True, commands=commands)
def test_ntp_server_state_no_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
enabled='no')]))
self.execute_module(changed=False)
def test_ntp_server_state_with_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
enabled='yes')]))
commands = ['no ntp server 2.2.2.2 disable']
self.execute_module(changed=True, commands=commands)
def test_ntp_server_version_no_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
version='4')]))
self.execute_module(changed=False)
def test_ntp_server_version_with_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
version='3')]))
commands = ['ntp server 2.2.2.2 version 3']
self.execute_module(changed=True, commands=commands)
def test_ntp_server_keyID_no_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
key_id='99')]))
self.execute_module(changed=False)
def test_ntp_server_keyID_with_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
key_id='8')]))
commands = ['ntp server 2.2.2.2 keyID 8']
self.execute_module(changed=True, commands=commands)
def test_ntp_server_trusted_state_no_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
trusted_enable='yes')]))
self.execute_module(changed=False)
def test_ntp_server_trusted_state_with_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
trusted_enable='no')]))
commands = ['no ntp server 2.2.2.2 trusted-enable']
self.execute_module(changed=True, commands=commands)
def test_ntp_server_delete_with_change(self):
set_module_args(dict(server=[dict(ip_or_name='2.2.2.2',
state='absent')]))
commands = ['no ntp server 2.2.2.2']
self.execute_module(changed=True, commands=commands)
def test_ntpdate_with_change(self):
set_module_args(dict(ntpdate='192.22.1.66'))
commands = ['ntpdate 192.22.1.66']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/optimize/tests/test_zeros.py | 59 | 1963 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from math import sqrt, exp, sin, cos
from numpy.testing import (TestCase, assert_almost_equal, assert_warns,
assert_, run_module_suite, assert_allclose)
from scipy.optimize import zeros as cc
from scipy.optimize import zeros
# Import testing parameters
from scipy.optimize._tstutils import functions, fstrings
class TestBasic(TestCase):
def run_check(self, method, name):
a = .5
b = sqrt(3)
for function, fname in zip(functions, fstrings):
zero, r = method(function, a, b, xtol=0.1e-12, full_output=True)
assert_(r.converged)
assert_almost_equal(zero, 1.0, decimal=12,
err_msg='method %s, function %s' % (name, fname))
def test_bisect(self):
self.run_check(cc.bisect, 'bisect')
def test_ridder(self):
self.run_check(cc.ridder, 'ridder')
def test_brentq(self):
self.run_check(cc.brentq, 'brentq')
def test_brenth(self):
self.run_check(cc.brenth, 'brenth')
def test_newton(self):
f1 = lambda x: x**2 - 2*x - 1
f1_1 = lambda x: 2*x - 2
f1_2 = lambda x: 2.0 + 0*x
f2 = lambda x: exp(x) - cos(x)
f2_1 = lambda x: exp(x) + sin(x)
f2_2 = lambda x: exp(x) + cos(x)
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
x = zeros.newton(f, 3, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
def test_deriv_zero_warning(self):
func = lambda x: x**2
dfunc = lambda x: 2*x
assert_warns(RuntimeWarning, cc.newton, func, 0.0, dfunc)
if __name__ == '__main__':
run_module_suite()
| gpl-2.0 |
damchilly/tweepy | tweepy/models.py | 56 | 14021 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import absolute_import, print_function
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
# Max_id is always set to the *smallest* id, minus one, in the set
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
# Since_id is always set to the *greatest* id in the set
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""
Parse a list of JSON objects into
a result set of model instances.
"""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
def __eq__(self, other):
if isinstance(other, Status):
return self.id == other.id
return NotImplemented
def __ne__(self, other):
result = self == other
if result is NotImplemented:
return result
return not result
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name,
*args,
**kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name,
*args,
**kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name,
*args,
**kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id,
*args,
**kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
status_model = getattr(api.parser.model_factory, 'status') if api else Status
for status in json['statuses']:
results.append(status_model.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k, v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name,
self.slug,
**kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name,
self.slug,
**kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name,
self.slug,
id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name,
self.slug,
**kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name,
self.slug,
id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class Media(Model):
@classmethod
def parse(cls, api, json):
media = cls(api)
for k, v in json.items():
setattr(media, k, v)
return media
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
media = Media
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| mit |
psychopy/psychopy | psychopy/experiment/py2js.py | 1 | 7669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Converting code parameters and components from python (PsychoPy)
to JS (ES6/PsychoJS)
"""
import ast
import astunparse
import esprima
from os import path
from psychopy.constants import PY3
from psychopy import logging
if PY3:
from past.builtins import unicode
from io import StringIO
else:
from StringIO import StringIO
from psychopy.experiment.py2js_transpiler import translatePythonToJavaScript
class NamesJS(dict):
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except:
return "{}".format(name)
namesJS = NamesJS()
namesJS['sin'] = 'Math.sin'
namesJS['cos'] = 'Math.cos'
namesJS['tan'] = 'Math.tan'
namesJS['pi'] = 'Math.PI'
namesJS['rand'] = 'Math.random'
namesJS['random'] = 'Math.random'
namesJS['sqrt'] = 'Math.sqrt'
namesJS['abs'] = 'Math.abs'
namesJS['randint'] = 'util.randint'
namesJS['round'] = 'util.round' # better than Math.round, supports n DPs arg
namesJS['sum'] = 'util.sum'
class TupleTransformer(ast.NodeTransformer):
""" An ast subclass that walks the abstract syntax tree and
allows modification of nodes.
This class transforms a tuple to a list.
:returns node
"""
def visit_Tuple(self, node):
return ast.List(node.elts, node.ctx)
class Unparser(astunparse.Unparser):
"""astunparser had buried the future_imports option underneath its init()
so we need to override that method and change it."""
def __init__(self, tree, file):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = ['unicode_literals']
self._indent = 0
self.dispatch(tree)
self.f.flush()
def unparse(tree):
v = StringIO()
Unparser(tree, file=v)
return v.getvalue()
def expression2js(expr):
"""Convert a short expression (e.g. a Component Parameter) Python to JS"""
# if the code contains a tuple (anywhere), convert parenths to be list.
# This now works for compounds like `(2*(4, 5))` where the inner
# parenths becomes a list and the outer parens indicate priority.
# This works by running an ast transformer class to swap the contents of the tuple
# into a list for the number of tuples in the expression.
try:
syntaxTree = ast.parse(expr)
except Exception:
try:
syntaxTree = ast.parse(unicode(expr))
except Exception as err:
logging.error(err)
return
for node in ast.walk(syntaxTree):
TupleTransformer().visit(node) # Transform tuples to list
# for py2 using 'unicode_literals' we don't want
if isinstance(node, ast.Str) and type(node.s)==bytes:
node.s = unicode(node.s, 'utf-8')
elif isinstance(node, ast.Str) and node.s.startswith("u'"):
node.s = node.s[1:]
if isinstance(node, ast.Name):
if node.id == 'undefined':
continue
node.id = namesJS[node.id]
jsStr = unparse(syntaxTree).strip()
if not any(ch in jsStr for ch in ("=",";","\n")):
try:
jsStr = translatePythonToJavaScript(jsStr)
if jsStr.endswith(';\n'):
jsStr = jsStr[:-2]
except:
# If translation fails, just use old translation
pass
return jsStr
def snippet2js(expr):
"""Convert several lines (e.g. a Code Component) Python to JS"""
# for now this is just adding ';' onto each line ending so will fail on
# most code (e.g. if... for... will certainly fail)
# do nothing for now
return expr
def findUndeclaredVariables(ast, allUndeclaredVariables):
"""Detect undeclared variables
"""
undeclaredVariables = []
for expression in ast:
if expression.type == 'ExpressionStatement':
expression = expression.expression
if expression.type == 'AssignmentExpression' and expression.operator == '=' and expression.left.type == 'Identifier':
variableName = expression.left.name
if variableName not in allUndeclaredVariables:
undeclaredVariables.append(variableName)
allUndeclaredVariables.append(variableName)
elif expression.type == 'IfStatement':
if expression.consequent.body is None:
consequentVariables = findUndeclaredVariables(
[expression.consequent], allUndeclaredVariables)
else:
consequentVariables = findUndeclaredVariables(
expression.consequent.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
elif expression.type == "ReturnStatement":
if expression.argument.type == "FunctionExpression":
consequentVariables = findUndeclaredVariables(
expression.argument.body.body, allUndeclaredVariables)
undeclaredVariables.extend(consequentVariables)
return undeclaredVariables
def addVariableDeclarations(inputProgram, fileName):
"""Transform the input program by adding just before each function
a declaration for its undeclared variables
"""
# parse Javascript code into abstract syntax tree:
# NB: esprima: https://media.readthedocs.org/pdf/esprima/4.0/esprima.pdf
try:
ast = esprima.parseScript(inputProgram, {'range': True, 'tolerant': True})
except esprima.error_handler.Error as err:
logging.error("{0} in {1}".format(err, path.split(fileName)[1]))
return inputProgram # So JS can be written to file
# find undeclared vars in functions and declare them before the function
outputProgram = inputProgram
offset = 0
allUndeclaredVariables = []
for expression in ast.body:
if expression.type == 'FunctionDeclaration':
# find all undeclared variables:
undeclaredVariables = findUndeclaredVariables(expression.body.body,
allUndeclaredVariables)
# add declarations (var) just before the function:
funSpacing = ['', '\n'][len(undeclaredVariables) > 0] # for consistent function spacing
declaration = funSpacing + '\n'.join(['var ' + variable + ';' for variable in
undeclaredVariables]) + '\n'
startIndex = expression.range[0] + offset
outputProgram = outputProgram[
:startIndex] + declaration + outputProgram[
startIndex:]
offset += len(declaration)
return outputProgram
if __name__ == '__main__':
for expr in ['sin(t)', 't*5',
'(3, 4)', '(5*-2)', # tuple and not tuple
'(1,(2,3), (1,2,3), (-4,-5,-6))', '2*(2, 3)', # combinations
'[1, (2*2)]', # List with nested operations returns list + nested tuple
'(.7, .7)', # A tuple returns list
'(-.7, .7)', # A tuple with unary operators returns nested lists
'[-.7, -.7]', # A list with unary operators returns list with nested tuple
'[-.7, (-.7 * 7)]']: # List with unary operators and nested tuple with operations returns list + tuple
print("{} -> {}".format(repr(expr), repr(expression2js(expr))))
| gpl-3.0 |
tonybaloney/st2contrib | packs/github/actions/lib/base.py | 3 | 4377 | from github import Github
import requests
from bs4 import BeautifulSoup
import json
from st2actions.runners.pythonrunner import Action
__all__ = [
'BaseGithubAction'
]
# Default Github web URL (used by tasks which directly scrape data from HTML)
# pages
DEFAULT_WEB_URL = 'https://github.com'
# Default Github API url
DEFAULT_API_URL = 'https://api.github.com'
class BaseGithubAction(Action):
def __init__(self, config):
super(BaseGithubAction, self).__init__(config=config)
token = self.config.get('token', None)
self.token = token or None
self.github_url = self.config.get('github_url', DEFAULT_API_URL)
self.enterprise_url = self.config.get('enterprise_url', None)
self.default_github_type = self.config.get('github_type', None)
self._client = Github(self.token, base_url=self.github_url)
self._session = requests.Session()
def _web_session(self):
'''Returns a requests session to scrape off the web'''
login_url = DEFAULT_WEB_URL + '/login'
session = requests.Session()
request = session.get(login_url).text
html = BeautifulSoup(request)
token = html.find('input', {'name': 'authenticity_token'}).attrs['value']
commit_value = html.find('input', {'name': 'commit'}).attrs['value']
session_path = html.find('form', {'method': 'post'}).attrs['action']
login_data = {
'login': self.config['user'],
'password': self.config['password'],
'commit': commit_value,
'authenticity_token': token
}
session_url = DEFAULT_WEB_URL + session_path
session.post(session_url, data=login_data)
return session
def _get_analytics(self, category, repo):
url = DEFAULT_WEB_URL + repo + '/graphs/' + category + '.json'
s = self._web_session()
response = s.get(url)
return response.json()
def _is_enterprise(self, github_type):
if github_type == "enterprise":
return True
elif github_type == "online":
return False
elif self.default_github_type == "enterprise":
return True
elif self.default_github_type == "online":
return False
else:
raise ValueError("Default GitHub Invalid!")
def _get_user_token(self, user, enterprise):
"""
Return a users GitHub OAuth Token, if it fails replace '-'
with '.' as '.' is not valid for GitHub names.
"""
if enterprise:
token_name = "token_enterprise_"
else:
token_name = "token_"
token = self.action_service.get_value(token_name + user)
# if a token is not returned, try using reversing changes made by
# GitHub Enterpise during LDAP sync'ing.
if token is None:
token = self.action_service.get_value(
token_name + user.replace("-", "."))
return token
def _change_to_user_token(self, user, enterprise=False):
token = self._get_user_token(user, enterprise)
if enterprise:
self._client = Github(token, base_url=self.enterprise_url)
else:
self._client = Github(token, base_url=self.github_url)
return True
def _request(self, method, uri, payload, token, enterprise):
headers = {'Authorization': 'token {}'.format(token)}
if enterprise:
url = "{}{}".format(self.enterprise_url, uri)
else:
url = "{}{}".format(self.github_url, uri)
try:
r = self._session.request(method,
url,
data=json.dumps(payload),
headers=headers,
verify=False)
r.raise_for_status()
except requests.exceptions.HTTPError:
raise Exception(
"ERROR: '{}'ing to '{}' - status code: {} payload: {}".format(
method, url, r.status_code, json.dumps(payload)))
except requests.exceptions.ConnectionError, e:
raise Exception("Could not connect to: {} : {}".format(url, e))
else:
if r.status_code == 204:
return None
else:
return r.json()
| apache-2.0 |
Matt-Deacalion/django | django/conf/locale/id/formats.py | 504 | 2135 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
exp-publishing/cloudbot-plugins | plugins/gaming.py | 1 | 4430 | """
gaming.py
Dice, coins, and random generation for gaming.
Modified By:
- Luke Rogers <https://github.com/lukeroge>
- Josh Elsasser <https://github.com/jaelsasser>
License:
GPL v3
"""
import asyncio
import random
import re
from cloudbot import hook
whitespace_re = re.compile(r'\s+')
valid_diceroll = re.compile(r'^([+-]?(?:\d+|\d*d(?:\d+|F))(?:[+-](?:\d+|\d*d(?:\d+|F)))*)( .+)?$', re.I)
sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def n_rolls(count, n):
"""roll an n-sided die count times
:type count: int
:type n: int | str
"""
if n == "F":
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
(.5 * (1 + n)) ** 2) * count) ** .5))]
@asyncio.coroutine
@hook.command("roll", "dice")
def dice(text, notice):
"""<dice roll> - simulates dice rolls. Example: 'dice 2d20-d5+4 roll 2': D20s, subtract 1D5, add 4
:type text: str
"""
if hasattr(text, "groups"):
text, desc = text.groups()
else: # type(text) == str
match = valid_diceroll.match(whitespace_re.sub("", text))
if match:
text, desc = match.groups()
else:
notice("Invalid dice roll '{}'".format(text))
return
if "d" not in text:
return
spec = whitespace_re.sub('', text)
if not valid_diceroll.match(spec):
notice("Invalid dice roll '{}'".format(text))
return
groups = sign_re.findall(spec)
total = 0
rolls = []
for roll in groups:
count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in n_rolls(count, "F"):
if fudge == 1:
rolls.append("\x033+\x0F")
elif fudge == -1:
rolls.append("\x034-\x0F")
else:
rolls.append("0")
total += fudge
elif side == "":
total += count
else:
side = int(side)
try:
if count > 0:
d = n_rolls(count, side)
rolls += list(map(str, d))
total += sum(d)
else:
d = n_rolls(-count, side)
rolls += [str(-x) for x in d]
total -= sum(d)
except OverflowError:
# I have never seen this happen. If you make this happen, you win a cookie
return "Thanks for overflowing a float, jerk >:["
if desc:
return "{}: {} ({})".format(desc.strip(), total, ", ".join(rolls))
else:
return "{} ({})".format(total, ", ".join(rolls))
@asyncio.coroutine
@hook.command("choice", "choose")
def choose(text, notice):
"""<choice1>, [choice2], [choice3], etc. - randomly picks one of the given choices
:type text: str
"""
choices = re.findall(r'([^,]+)', text)
if len(choices) == 1:
notice(choose.__doc__)
return
return random.choice(choices)
@asyncio.coroutine
@hook.command(autohelp=False)
def coin(text, notice, action):
"""[amount] - flips [amount] coins
:type text: str
"""
if text:
try:
amount = int(text)
except (ValueError, TypeError):
notice("Invalid input '{}': not a number".format(text))
return
else:
amount = 1
if amount == 1:
action("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
action("makes a coin flipping motion")
else:
heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5))
tails = amount - heads
action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails))
| gpl-3.0 |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_outline04.py | 1 | 2728 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'outline04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_create_file(self):
"""
Test the creation of a outlines in a XlsxWriter file. These tests are
based on the outline programs in the examples directory.
"""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet4 = workbook.add_worksheet('Outline levels')
levels = [
"Level 1", "Level 2", "Level 3", "Level 4", "Level 5", "Level 6",
"Level 7", "Level 6", "Level 5", "Level 4", "Level 3", "Level 2",
"Level 1"]
worksheet4.write_column('A1', levels)
worksheet4.set_row(0, None, None, {'level': 1})
worksheet4.set_row(1, None, None, {'level': 2})
worksheet4.set_row(2, None, None, {'level': 3})
worksheet4.set_row(3, None, None, {'level': 4})
worksheet4.set_row(4, None, None, {'level': 5})
worksheet4.set_row(5, None, None, {'level': 6})
worksheet4.set_row(6, None, None, {'level': 7})
worksheet4.set_row(7, None, None, {'level': 6})
worksheet4.set_row(8, None, None, {'level': 5})
worksheet4.set_row(9, None, None, {'level': 4})
worksheet4.set_row(10, None, None, {'level': 3})
worksheet4.set_row(11, None, None, {'level': 2})
worksheet4.set_row(12, None, None, {'level': 1})
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
defionscode/ansible-modules-core | utilities/logic/debug.py | 66 | 2141 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook. Useful for debugging together with the 'when:' directive.
version_added: "0.8"
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic
message.
required: false
default: "Hello world!"
var:
description:
- A variable name to debug. Mutually exclusive with the 'msg' option.
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
required: False
default: 0
version_added: "2.1"
author:
- "Dag Wieers (@dagwieers)"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Example that prints the loopback address and gateway for each host
- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}"
- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}"
when: ansible_default_ipv4.gateway is defined
- shell: /usr/bin/uptime
register: result
- debug: var=result verbosity=2
- name: Display all variables/facts known for a host
debug: var=hostvars[inventory_hostname] verbosity=4
'''
| gpl-3.0 |
prabhjyotsingh/incubator-zeppelin | flink/interpreter/src/main/resources/python/zeppelin_pyflink.py | 10 | 2806 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyflink.common import *
from pyflink.dataset import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
from pyflink.table.udf import *
import pyflink
from py4j.java_gateway import java_import
intp = gateway.entry_point
pyflink.java_gateway._gateway = gateway
pyflink.java_gateway.import_flink_view(gateway)
pyflink.java_gateway.install_exception_handler()
b_env = pyflink.dataset.ExecutionEnvironment(intp.getJavaExecutionEnvironment())
s_env = StreamExecutionEnvironment(intp.getJavaStreamExecutionEnvironment())
if intp.isFlink110():
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"), True)
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"), False)
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"), True)
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"), False)
else:
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"))
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"))
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"))
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"))
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPyFlinkZeppelinContext
class PyFlinkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PyFlinkZeppelinContext, self).__init__(z, gateway)
def show(self, obj, **kwargs):
from pyflink.table import Table
if isinstance(obj, Table):
if 'stream_type' in kwargs:
self.z.show(obj._j_table, kwargs['stream_type'], kwargs)
else:
print(self.z.showData(obj._j_table))
else:
super(PyFlinkZeppelinContext, self).show(obj, **kwargs)
z = __zeppelin__ = PyFlinkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
| apache-2.0 |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/src/gdata/Crypto/PublicKey/qNEW.py | 228 | 5545 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| gpl-3.0 |
clstl/servo | tests/wpt/css-tests/tools/html5lib/doc/conf.py | 436 | 9028 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# html5lib documentation build configuration file, created by
# sphinx-quickstart on Wed May 8 00:04:49 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'html5lib'
copyright = '2006 - 2013, James Graham, Geoffrey Sneddon, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
sys.path.append(os.path.abspath('..'))
from html5lib import __version__
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'theme']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'html5libdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'html5lib.tex', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'html5lib', 'html5lib Documentation',
['James Graham, Geoffrey Sneddon, and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'html5lib', 'html5lib Documentation',
'James Graham, Geoffrey Sneddon, and contributors', 'html5lib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class CExtMock(object):
"""Required for autodoc on readthedocs.org where you cannot build C extensions."""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return CExtMock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return CExtMock()
try:
import lxml # flake8: noqa
except ImportError:
sys.modules['lxml'] = CExtMock()
sys.modules['lxml.etree'] = CExtMock()
print("warning: lxml modules mocked.")
try:
import genshi # flake8: noqa
except ImportError:
sys.modules['genshi'] = CExtMock()
sys.modules['genshi.core'] = CExtMock()
print("warning: genshi modules mocked.")
| mpl-2.0 |
MatthewWilkes/django | tests/schema/fields.py | 141 | 2818 | from django.db import models
from django.db.models.fields.related import (
RECURSIVE_RELATIONSHIP_CONSTANT, ManyToManyDescriptor, ManyToManyField,
ManyToManyRel, RelatedField, create_many_to_many_intermediary_model,
)
from django.utils.functional import curry
class CustomManyToManyField(RelatedField):
"""
Ticket #24104 - Need to have a custom ManyToManyField,
which is not an inheritor of ManyToManyField.
"""
many_to_many = True
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError:
to = str(to)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(CustomManyToManyField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
super(CustomManyToManyField, self).contribute_to_class(cls, name, **kwargs)
if not self.remote_field.through and not cls._meta.abstract and not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field))
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def get_internal_type(self):
return 'ManyToManyField'
# Copy those methods from ManyToManyField because they don't call super() internally
contribute_to_related_class = ManyToManyField.__dict__['contribute_to_related_class']
_get_m2m_attr = ManyToManyField.__dict__['_get_m2m_attr']
_get_m2m_reverse_attr = ManyToManyField.__dict__['_get_m2m_reverse_attr']
_get_m2m_db_table = ManyToManyField.__dict__['_get_m2m_db_table']
class InheritedManyToManyField(ManyToManyField):
pass
class MediumBlobField(models.BinaryField):
"""
A MySQL BinaryField that uses a different blob size.
"""
def db_type(self, connection):
return 'MEDIUMBLOB'
| bsd-3-clause |
simbha/mAngE-Gin | lib/Django 1.7/django/core/exceptions.py | 40 | 5079 | """
Global Django exception and warning classes.
"""
from functools import reduce
import operator
from django.utils import six
from django.utils.encoding import force_text
class DjangoRuntimeWarning(RuntimeWarning):
pass
class AppRegistryNotReady(Exception):
"""The django.apps registry is not populated yet"""
pass
class ObjectDoesNotExist(Exception):
"""The requested object does not exist"""
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"""The query returned multiple objects when only one was expected."""
pass
class SuspiciousOperation(Exception):
"""The user did something suspicious"""
class SuspiciousMultipartForm(SuspiciousOperation):
"""Suspect MIME request in multipart form data"""
pass
class SuspiciousFileOperation(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
class DisallowedHost(SuspiciousOperation):
"""HTTP_HOST header contains invalid value"""
pass
class DisallowedRedirect(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
class PermissionDenied(Exception):
"""The user did not have permission to do that"""
pass
class ViewDoesNotExist(Exception):
"""The requested view does not exist"""
pass
class MiddlewareNotUsed(Exception):
"""This middleware is not used in this server configuration"""
pass
class ImproperlyConfigured(Exception):
"""Django is somehow improperly configured"""
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
# PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.
super(ValidationError, self).__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
# PY2 has a `message` property which is always there so we can't
# duck-type on it. It was introduced in Python 2.5 and already
# deprecated in Python 2.6.
elif not hasattr(message, 'message' if six.PY3 else 'code'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self]
@property
def message_dict(self):
# Trigger an AttributeError if this ValidationError
# doesn't have an error_dict.
getattr(self, 'error_dict')
return dict(self)
@property
def messages(self):
if hasattr(self, 'error_dict'):
return reduce(operator.add, dict(self).values())
return list(self)
def update_error_dict(self, error_dict):
if hasattr(self, 'error_dict'):
for field, error_list in self.error_dict.items():
error_dict.setdefault(field, []).extend(error_list)
else:
error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list)
return error_dict
def __iter__(self):
if hasattr(self, 'error_dict'):
for field, errors in self.error_dict.items():
yield field, list(ValidationError(errors))
else:
for error in self.error_list:
message = error.message
if error.params:
message %= error.params
yield force_text(message)
def __str__(self):
if hasattr(self, 'error_dict'):
return repr(dict(self))
return repr(list(self))
def __repr__(self):
return 'ValidationError(%s)' % self
| mit |
nash-x/hws | nova/api/openstack/compute/limits.py | 6 | 16528 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
This module handles rate liming at a per-user level, so it should not be used
to prevent intentional Denial of Service attacks, as we can assume a DOS can
easily come through multiple user accounts. DOS protection should be done at a
different layer. Instead this module should be used to protect against
unintentional user actions. With that in mind the limits set here should be
high enough as to not rate-limit any intentional actions.
To find good rate-limit values, check how long requests are taking (see logs)
in your environment to assess your capabilities and multiply out to get
figures.
NOTE: As the rate-limiting here is done in memory, this only works per
process (each process will have its own rate limiting counter).
"""
import collections
import copy
import httplib
import math
import re
import time
import webob.dec
import webob.exc
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import quota
from nova import utils
from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(object):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def create(self, req, body):
"""Create a new limit."""
raise webob.exc.HTTPNotImplemented()
def delete(self, req, id):
"""Delete the limit."""
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
"""Return limit details."""
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Show limit information."""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""Update existing limit."""
raise webob.exc.HTTPNotImplemented()
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = (_("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.") %
{'value': self.value, 'verb': self.verb, 'uri': self.uri,
'unit_string': self.unit_string})
self.error_message = msg
def __call__(self, verb, url):
"""Represents a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']),
Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*changes-since*", ".*changes-since.*", 120,
utils.TIME_UNITS['MINUTE']),
Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware. All limit
information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`.
It wraps the given WSGI application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represents a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("nova.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.RateLimitFault(msg, error, retry)
req.environ["nova.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances. This
implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in utils.TIME_UNITS:
raise ValueError("Invalid units specified")
unit = utils.TIME_UNITS[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application. Uses an in-memory
`Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request *will*
succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit
proxy.
@return: Empty list.
"""
return []
| apache-2.0 |
prasunroypr/digit-recognizer | source/defs.py | 1 | 6607 | ################################################################################
"""
Functions for Digit Recognition
Created on Wed Jun 01 00:00:00 2016
@author: Prasun Roy
@e-mail: prasunroy.pr@gmail.com
"""
################################################################################
# import modules
import matplotlib.pyplot as pplt
import numpy as np
import os
import pandas as pd
import skimage.feature as skim
import sklearn.preprocessing as pp
import time
from conf import _config
from conf import _configinfo
################################################################################
def _fscale(data, split=False, load=False, verbose=False):
# initialize scaler
scaler = pp.MinMaxScaler()
# initialize variables
config = _configinfo()
sdpath = config['root_data_path'] + 'scaled.npy'
# scale data
if verbose: print('scaling features............... ', end = '')
data = np.array(data, dtype='float64')
if load and os.path.isfile(sdpath):
m = np.load(sdpath)[0]
r = np.load(sdpath)[1]
r[r==0] = 1
data = (data - m) / r
elif split:
train = data[:config['train_d']]
valid = data[config['train_d']:]
scaler.fit(train)
m = scaler.data_min_
r = scaler.data_range_
train = scaler.transform(train)
valid = scaler.transform(valid)
data = np.vstack((train, valid))
else:
data = scaler.fit_transform(data)
m = scaler.data_min_
r = scaler.data_range_
if verbose: print('done')
# save scaled config
if not load: np.save(sdpath, np.vstack((m, r)))
# return scaled data
return data
################################################################################
def _haar(data, load=True, save=False, verbose=False):
return data
################################################################################
def _hogs(data, load=True, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
datapath = config['hogs_data_path']
data_hog = []
# load hog data if exists
if load and os.path.isfile(datapath):
if verbose: print('loading descriptors............ ', end = '')
data_hog = np.load(datapath)
if verbose: print('done')
# calculate hog data otherwise
else:
# initialize variables
ix = config['shape_x']
iy = config['shape_y']
bn = config['bins_n']
cx = config['cell_x']
cy = config['cell_y']
bw = config['blok_w']
bh = config['blok_h']
# perform hog
t_beg = time.time()
size = data.shape[0]
loop = 0
for image in data:
if verbose: print('\rextracting descriptors......... %d%%'
%(loop*100//size), end = '')
desc = skim.hog(image.reshape(ix, iy), orientations=bn,
pixels_per_cell=(cx, cy), cells_per_block=(bw, bh))
data_hog.append(desc)
loop = loop + 1
data_hog = np.array(data_hog, dtype='float64')
t_end = time.time()
if verbose: print('\rextracting descriptors......... done @ %8.2f sec'
%(t_end - t_beg))
# save data
if save:
if verbose: print('saving descriptors............. ', end = '')
np.save(datapath, data_hog)
if verbose: print('done')
# return hog
return data_hog
################################################################################
def _sift(data, load=True, save=False, verbose=False):
return data
################################################################################
def _surf(data, load=True, save=False, verbose=False):
return data
################################################################################
def _plot(classifier, train, valid, step=None, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
if step is None: step = config['steps_d']
plot_figs_head = config['classifier'] + '-' + config['preprocess']
plot_data_path = config['plot_data_path']
plot_figs_path = config['plot_figs_path']
m_train = train.shape[0]
m_valid = valid.shape[0]
X_valid = valid[:, 1:]
y_valid = valid[:, 0]
error_train = []
error_valid = []
sizes_train = []
# calculate data for plot
for i in range(0, m_train, step):
if verbose: print('\rgenerating plot................ %d%%'
%(i*100//m_train), end = '')
# randomly shuffle training data
np.random.shuffle(train)
# select subset of randomized training data
X_train = train[:i+step, 1:]
y_train = train[:i+step, 0]
# train classifier with selected data
classifier.fit(X_train, y_train)
# cross-validate classifier
p_train = classifier.predict(X_train)
p_valid = classifier.predict(X_valid)
# estimate errors
error_train.append(sum(y_train != p_train) / len(y_train))
error_valid.append(sum(y_valid != p_valid) / m_valid)
sizes_train.append(i+step)
error_train = np.array(error_train, dtype='float64')
error_valid = np.array(error_valid, dtype='float64')
sizes_train = np.array(sizes_train, dtype='uint32')
if verbose: print('\rgenerating plot................ done')
# plot data
pplt.plot(sizes_train, error_train, 'rs-', label='training error')
pplt.plot(sizes_train, error_valid, 'gs-', label='cross-validation error')
pplt.title(plot_figs_head.upper()+' Learning Curve')
pplt.xlabel('number of training instances')
pplt.ylabel('classification error')
pplt.legend()
xmin,xmax = pplt.xlim()
ymin,ymax = pplt.ylim()
pplt.axis([xmin, xmax+step, ymin, ymax+0.01])
pplt.grid(True)
# save data
if save:
if verbose: print('saving plot.................... ', end = '')
data = pd.DataFrame({'x1_TrainSizes':sizes_train,
'y1_TrainError':error_train,
'y2_ValidError':error_valid})
data.to_csv(plot_data_path, index=False)
pplt.savefig(plot_figs_path)
if verbose: print('done')
# display plot
pplt.show()
################################################################################
| gpl-3.0 |
UAlbanyArchives/asInventory | archives_tools/migrateCMS.py | 3 | 5481 | import aspace as AS
import datetime
from openpyxl import load_workbook
import os
if os.name == "nt":
#Windows Directory Names
#Collection and Subject spreadsheets directory
spreadDir = "\\\\romeo\\Collect\spe\\Tools\\CollectionList"
cmsDir = "\\\\romeo\\Collect\\spe\\Greg\\CMSdata"
else:
#Unix directory names
#Collection and Subject spreadsheets directory
spreadDir = "/media/bcadmin/Collect/spe/Tools/CollectionList"
#parse Collection List spreadsheet
collectionListFile = os.path.join(spreadDir, "collectionList.xlsx")
collectionWorkbook = load_workbook(filename = collectionListFile, use_iterators=True, read_only=True)
collectionList = collectionWorkbook.get_sheet_by_name('collectionList')
#parse Local Subject Lists spreadsheet
subjectGuidesFile = os.path.join(spreadDir, "subjectGuides.xlsx")
subjectWorkbook = load_workbook(filename = subjectGuidesFile, use_iterators=True, read_only=True)
subjectGuides = subjectWorkbook.get_sheet_by_name('subjectGuides')
#Parse List of Collections to list of lists
rowIndex = 0
collections = []
for row in collectionList.rows:
rowIndex = rowIndex + 1
if rowIndex > 1:
collection = [str(rowIndex), row[0].value, row[1].value, row[2].value, row[3].value, row[4].value, row[5].value, row[6].value, row[7].value, row[8].value, row[9].value, row[10].value, row[11].value]
collections.append(collection)
session = AS.getSession()
repo = "2"
print ("Getting List of IDs from ASpace...")
resourceIDs = []
for resource in AS.getResources(session, repo, "all"):
resourceID = resource.ead_id
if "_" in resourceID:
resourceID = resourceID.split("_")[1]
resourceIDs.append(resourceID)
print ("Getting a list of subjects from ASpace...")
subjectData = AS.getSubjects(session, "all")
for collection in collections:
if str(collection[6]).lower().strip() == "null" or str(collection[6]).lower().strip() == "undated":
print ("No Date for " + collection[1] + " " + collection[4])
elif collection[8].lower().strip() == "null":
print ("No Extent for " + collection[1] + " " + collection[4])
else:
if not collection[1] in resourceIDs:
print ("making resouce for " + collection[4])
newRes = AS.makeResource()
resType = collection[5]
collectionName = collection[4]
if "," in collectionName and collection[5].lower().strip() == "papers":
fixedName = collectionName.split(",")[1].strip() + " " + collectionName.split(",")[0].strip()
elif collectionName.lower().strip().startswith("office of"):
fixedName = collectionName.split(",")[1].strip() + " " + collectionName.split(",")[0].strip()
else:
fixedName = collectionName
newRes.title = fixedName + " " + resType
newRes.level = "collection"
colID = collection[1].strip()
typeList = ["collection", "papers", "publications", "records"]
if resType.lower() in typeList:
newRes.resource_type = resType.lower()
else:
newRes.resource_type = "collection"
restrict = collection[2]
if not restrict is None:
newRes.publish = False
newRes = AS.makeMultiNote(newRes, "accessrestrict", restrict)
for note in newRes.notes:
if note["type"] == "accessrestrict":
note["publish"] = True
for subnote in note["subnotes"]:
subnote["publish"] = True
else:
newRes.publish = True
newRes.id_0 = colID
newRes.ead_id = colID
newRes.ead_location = "http://meg.library.albany.edu:8080/archive/view?docId=" + colID + ".xml"
if collection[3].lower().strip() == "html":
newRes.finding_aid_note = "HTML Container List"
newRes.finding_aid_author = "Migrated from CMS and Drupal Abstracts"
newRes.finding_aid_date = datetime.datetime.now().isoformat().split("T")[0]
#newRes = newRes.AS.addContainerLocation(newRes, "Collection", locationList, locationNote)
dateField = str(collection[6])
if "ca. " in dateField.lower().strip():
dateField = dateField.replace("ca. ", "")
if not dateField.lower().strip() == "null":
if "," in dateField:
for dateText in dateField.split(","):
date = dateText.strip()
if "-" in date:
newRes = AS.makeDate(newRes, date.split("-")[0], date.split("-")[1])
else:
newRes = AS.makeDate(newRes, date.split("-")[0], "")
else:
if "-" in dateField:
newRes = AS.makeDate(newRes, dateField.split("-")[0], dateField.split("-")[1])
else:
newRes = AS.makeDate(newRes, dateField.split("-")[0], "")
newRes = AS.makeExtent(newRes, collection[7], collection[8])
abstractText = collection[9]
newRes = AS.makeSingleNote(newRes, "abstract", abstractText)
print (" adding subjects")
megList = []
subjectSet = []
subjectRowNumber = 0
for subjectRow in subjectGuides.rows:
subjectRowNumber = subjectRowNumber + 1
if subjectRowNumber == 1:
subjectSet = subjectRow
else:
colCount = 0
for subID in subjectRow:
colCount = colCount + 1
if not subID.value is None:
if subID.value.lower().strip() == colID:
megList.append(subjectSet[colCount - 1])
for megSubject in megList:
subjectFound = False
for subject in subjectData:
for entry in subject.terms:
try:
if entry.term == megSubject:
subjectFound = True
subjectRef = subject.uri
except:
pass
if subjectFound == True:
newRes = AS.addSubject(session, newRes, subjectRef)
AS.postResource(session, repo, newRes) #2035 | unlicense |
endolith/scikit-image | skimage/feature/tests/test_util.py | 35 | 2818 | import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from numpy.testing import assert_equal, assert_raises
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_prepare_grayscale_input_2D,
_mask_border_keypoints, plot_matches)
def test_feature_detector():
assert_raises(NotImplementedError, FeatureDetector().detect, None)
def test_descriptor_extractor():
assert_raises(NotImplementedError, DescriptorExtractor().extract,
None, None)
def test_prepare_grayscale_input_2D():
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
def test_mask_border_keypoints():
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
[1, 1, 1, 1, 1])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
[0, 0, 1, 1, 1])
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
[0, 0, 1, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
[0, 0, 0, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
[0, 0, 0, 0, 1])
@np.testing.decorators.skipif(plt is None)
def test_plot_matches():
fig, ax = plt.subplots(nrows=1, ncols=1)
shapes = (((10, 10), (10, 10)),
((10, 10), (12, 10)),
((10, 10), (10, 12)),
((10, 10), (12, 12)),
((12, 10), (10, 10)),
((10, 12), (10, 10)),
((12, 12), (10, 10)))
keypoints1 = 10 * np.random.rand(10, 2)
keypoints2 = 10 * np.random.rand(10, 2)
idxs1 = np.random.randint(10, size=10)
idxs2 = np.random.randint(10, size=10)
matches = np.column_stack((idxs1, idxs2))
for shape1, shape2 in shapes:
img1 = np.zeros(shape1)
img2 = np.zeros(shape2)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
only_matches=True)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
keypoints_color='r')
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
matches_color='r')
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
alexsavio/scikit-learn | examples/model_selection/plot_roc_crossval.py | 21 | 3477 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
partofthething/home-assistant | homeassistant/components/pushsafer/notify.py | 1 | 6128 | """Pushsafer platform for notify component."""
import base64
import logging
import mimetypes
import requests
from requests.auth import HTTPBasicAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import ATTR_ICON, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://www.pushsafer.com/api"
_ALLOWED_IMAGES = ["image/gif", "image/jpeg", "image/png"]
CONF_DEVICE_KEY = "private_key"
CONF_TIMEOUT = 15
# Top level attributes in 'data'
ATTR_SOUND = "sound"
ATTR_VIBRATION = "vibration"
ATTR_ICONCOLOR = "iconcolor"
ATTR_URL = "url"
ATTR_URLTITLE = "urltitle"
ATTR_TIME2LIVE = "time2live"
ATTR_PRIORITY = "priority"
ATTR_RETRY = "retry"
ATTR_EXPIRE = "expire"
ATTR_ANSWER = "answer"
ATTR_PICTURE1 = "picture1"
# Attributes contained in picture1
ATTR_PICTURE1_URL = "url"
ATTR_PICTURE1_PATH = "path"
ATTR_PICTURE1_USERNAME = "username"
ATTR_PICTURE1_PASSWORD = "password"
ATTR_PICTURE1_AUTH = "auth"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEVICE_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushsafer.com notification service."""
return PushsaferNotificationService(
config.get(CONF_DEVICE_KEY), hass.config.is_allowed_path
)
class PushsaferNotificationService(BaseNotificationService):
"""Implementation of the notification service for Pushsafer.com."""
def __init__(self, private_key, is_allowed_path):
"""Initialize the service."""
self._private_key = private_key
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to specified target."""
if kwargs.get(ATTR_TARGET) is None:
targets = ["a"]
_LOGGER.debug("No target specified. Sending push to all")
else:
targets = kwargs.get(ATTR_TARGET)
_LOGGER.debug("%s target(s) specified", len(targets))
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA, {})
# Converting the specified image to base64
picture1 = data.get(ATTR_PICTURE1)
picture1_encoded = ""
if picture1 is not None:
_LOGGER.debug("picture1 is available")
url = picture1.get(ATTR_PICTURE1_URL, None)
local_path = picture1.get(ATTR_PICTURE1_PATH, None)
username = picture1.get(ATTR_PICTURE1_USERNAME)
password = picture1.get(ATTR_PICTURE1_PASSWORD)
auth = picture1.get(ATTR_PICTURE1_AUTH)
if url is not None:
_LOGGER.debug("Loading image from url %s", url)
picture1_encoded = self.load_from_url(url, username, password, auth)
elif local_path is not None:
_LOGGER.debug("Loading image from file %s", local_path)
picture1_encoded = self.load_from_file(local_path)
else:
_LOGGER.warning("missing url or local_path for picture1")
else:
_LOGGER.debug("picture1 is not specified")
payload = {
"k": self._private_key,
"t": title,
"m": message,
"s": data.get(ATTR_SOUND, ""),
"v": data.get(ATTR_VIBRATION, ""),
"i": data.get(ATTR_ICON, ""),
"c": data.get(ATTR_ICONCOLOR, ""),
"u": data.get(ATTR_URL, ""),
"ut": data.get(ATTR_URLTITLE, ""),
"l": data.get(ATTR_TIME2LIVE, ""),
"pr": data.get(ATTR_PRIORITY, ""),
"re": data.get(ATTR_RETRY, ""),
"ex": data.get(ATTR_EXPIRE, ""),
"a": data.get(ATTR_ANSWER, ""),
"p": picture1_encoded,
}
for target in targets:
payload["d"] = target
response = requests.post(_RESOURCE, data=payload, timeout=CONF_TIMEOUT)
if response.status_code != HTTP_OK:
_LOGGER.error("Pushsafer failed with: %s", response.text)
else:
_LOGGER.debug("Push send: %s", response.json())
@classmethod
def get_base64(cls, filebyte, mimetype):
"""Convert the image to the expected base64 string of pushsafer."""
if mimetype not in _ALLOWED_IMAGES:
_LOGGER.warning("%s is a not supported mimetype for images", mimetype)
return None
base64_image = base64.b64encode(filebyte).decode("utf8")
return f"data:{mimetype};base64,{base64_image}"
def load_from_url(self, url=None, username=None, password=None, auth=None):
"""Load image/document/etc from URL."""
if url is not None:
_LOGGER.debug("Downloading image from %s", url)
if username is not None and password is not None:
auth_ = HTTPBasicAuth(username, password)
response = requests.get(url, auth=auth_, timeout=CONF_TIMEOUT)
else:
response = requests.get(url, timeout=CONF_TIMEOUT)
return self.get_base64(response.content, response.headers["content-type"])
_LOGGER.warning("url not found in param")
return None
def load_from_file(self, local_path=None):
"""Load image/document/etc from a local path."""
try:
if local_path is not None:
_LOGGER.debug("Loading image from local path")
if self.is_allowed_path(local_path):
file_mimetype = mimetypes.guess_type(local_path)
_LOGGER.debug("Detected mimetype %s", file_mimetype)
with open(local_path, "rb") as binary_file:
data = binary_file.read()
return self.get_base64(data, file_mimetype[0])
else:
_LOGGER.warning("Local path not found in params!")
except OSError as error:
_LOGGER.error("Can't load from local path: %s", error)
return None
| mit |
themadinventor/gerbmerge | gerbmerge/specs.py | 8 | 10158 | #!/usr/bin/env python
"""
Regular expression, SimpleParse, ane message constants.
Requires:
- SimpleParse 2.1 or higher
http://simpleparse.sourceforge.net
--------------------------------------------------------------------
This program is licensed under the GNU General Public License (GPL)
Version 3. See http://www.fsf.org for details of the license.
Rugged Circuits LLC
http://ruggedcircuits.com/gerbmerge
"""
import re
from simpleparse.parser import Parser
DISCLAIMER = """
****************************************************
* R E A D C A R E F U L L Y *
* *
* This program comes with no warranty. You use *
* this program at your own risk. Do not submit *
* board files for manufacture until you have *
* thoroughly inspected the output of this program *
* using a previewing program such as: *
* *
* Windows: *
* - GC-Prevue <http://www.graphicode.com> *
* - ViewMate <http://www.pentalogix.com> *
* *
* Linux: *
* - gerbv <http://gerbv.sourceforge.net> *
* *
* By using this program you agree to take full *
* responsibility for the correctness of the data *
* that is generated by this program. *
****************************************************
"""[1:-1]
# [Options] section defaults Data types: "L" = layers (will show layer selection)
# "D" = decimal
# "DP" = possitive decimal
# "I" = integer
# "IP" = integer positive
# "PI" = path input (will show open dialog)
# "PO" = path output (will show save dialog)
# "S" = string
# "B" = boolean
# "BI" = boolean as integer
#
# THESE DATA TYPES ARE FIXED - CODE MUST CHANGE IF TYPES ARE ADDED/MODIFIED
DEFAULT_OPTIONS = {
# Spacing in horizontal direction
'xspacing': ('0.125', "DP", "XSpacing", "1 XSPACING_HELP"),
# Spacing in vertical direction
'yspacing': ('0.125', "DP", "YSpacing", "2 YSPACING_HELP"),
# X-Dimension maximum panel size (Olimex)
'panelwidth': ('12.6', "DP", "PanelWidth", "3 PANEL_WIDTH"),
# Y-Dimension maximum panel size (Olimex)
'panelheight': ('7.8', "DP", "PanelHeight", "4 PanelHeight"),
# e.g., *toplayer,*bottomlayer
'cropmarklayers': (None, "L", "CropMarkLayers", "5 CropMarkLayers"),
# Width (inches) of crop lines
'cropmarkwidth': ('0.01', "DP", "CropMarkWidth", "6 CropMarkWidth"),
# as for cropmarklayers
'cutlinelayers': (None, "L", "CutLineLayers", "7 CutLineLayers"),
# Width (inches) of cut lines
'cutlinewidth': ('0.01', "DP", "CutLineWidth", "8 CutLineWidth"),
# Minimum dimension for selected layers
'minimumfeaturesize': (None, "S", "MinimumFeatureSize", "Use this option to automatically thicken features on particular layers.\nThis is intended for thickening silkscreen to some minimum width.\nThe value of this option must be a comma-separated list\nof layer names followed by minimum feature sizes (in inches) for that layer.\nComment this out to disable thickening. Example usage is:\n\nMinimumFeatureSize = *topsilkscreen,0.008,*bottomsilkscreen,0.008"),
# Name of file containing default tool list
'toollist': (None, "PI", "ToolList", "10 ToolList"),
# Tolerance for clustering drill sizes
'drillclustertolerance': ('.002', "DP", "DrillClusterTolerance", "11 DrillClusterTolerance"),
# Set to 1 to allow multiple jobs to have non-matching layers
'allowmissinglayers': (0, "BI", "AllowMissingLayers", "12 AllowMissingLayers"),
# Name of file to which to write fabrication drawing, or None
'fabricationdrawingfile': (None, "PO", "FabricationDrawingFile", "13 FabricationDrawingFile"),
# Name of file containing text to write to fab drawing
'fabricationdrawingtext': (None, "PI", "FabricationDrawingText", "14 FabricationDrawingText"),
# Number of digits after the decimal point in input Excellon files
'excellondecimals': (4, "IP", "ExcellonDecimals", "15 ExcellonDecimals"),
# Generate leading zeros in merged Excellon output file
'excellonleadingzeros': (0, "IP", "ExcellonLeadingZeros", "16 ExcellonLeadingZeros"),
# Name of file to which to write simple box outline, or None
'outlinelayerfile': (None, "PO", "OutlineLayerFile", "17 OutlineLayerFile"),
# Name of file to which to write scoring data, or None
'scoringfile': (None, "PO", "ScoringFile", "18 ScoringFile"),
# Inches of extra room to leave on left side of panel for tooling
'leftmargin': (0.0, "DP", "LeftMargin", "19 LeftMargin"),
# Inches of extra room to leave on top side of panel for tooling
'topmargin': (0.0, "DP", "TopMargin", "20 TopMargin"),
# Inches of extra room to leave on right side of panel for tooling
'rightmargin': (0.0, "DP", "RightMargin", "21 RightMargin"),
# Inches of extra room to leave on bottom side of panel for tooling
'bottommargin': (0.0, "DP", "BottomMargin", "22 BottomMargin"),
# List of X,Y points at which to draw fiducials
'fiducialpoints': (None, "S", "FiducialPoints", "23 FiducialPoints"),
}
DEFAULT_OPTIONS_TYPES = ["IP", "I", "DP", "D", "B", "BI", "S", "PI", "PO", "L"] # List of option types in display order
# [GerbMergeGUI] section defaults
DEFAULT_GERBMERGEGUI = {
'unit': "IN", # Unit inidicator: IN, MIL, MM
'layout': "AUTOMATIC", # Indicates layout: GRID, AUTOMATIC, MANUAL, GRID_FILE, MANUAL_FILE
'runtime': 10, # Seconds to run automatic placement
'rows': 1, # Number of rows in grid layout
'columns': 1, # Number of columns in grid layout
'mergedoutput': False, # Path of output directory
'mergedname': False, # Prefix of merged output files
'layoutfilepath': "", # Path of layout file
'placementfilepath': "", # Path of placement file
'configurationfilepath': "", # Path of configuration file
'configurationcomplete': False, # Indicates that run dialog may be skipped to upon load
}
# Job names
RE_VALID_JOB = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$')
RE_VALID_JOB_MESSAGE = "Vaild Characters: a-z, A-Z, 0-9, underscores, hyphens\nFirst Character must be: a-z, A-Z, 0-9"
RESERVED_JOB_NAMES = ("Options", "MergeOutputFiles", "GerbMergeGUI") ##not implemented yet
# Layer names
RE_VALID_LAYER = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$')
RE_VALID_LAYER_MESSAGE = "Vaild Characters: a-z, A-Z, 0-9, underscores, hyphens\nFirst Character must be: a-z, A-Z, 0-9"
DEFAULT_LAYERS = [ "BoardOutline",
"TopCopper",
"BottomCopper",
"InnerLayer2",
"InnerLayer3",
"TopSilkscreen",
"BottomSilkscreen",
"TopSoldermask",
"BottomSoldermask",
"TopSolderPasteMask",
"BottomSolderPasteMask",
"Drills" ]
REQUIRED_LAYERS = ["BoardOutline", "Drills"]
RESERVED_LAYER_NAMES = () ##add "mergeout", not implemented yet
#Output names
RE_VALID_OUTPUT_NAME = re.compile(r'^[a-zA-Z0-9_-]+$')
RE_VALID_OUTPUT_NAME_MESSAGE = "Vaild Characters: a-z, A-Z, 0-9, underscores, hyphens"
REQUIRED_LAYERS_OUTPUT = ["BoardOutline", "ToolList", "Placement", "Drills"]
# Default dictionary of layer names to file extensions
FILE_EXTENSIONS = { "boardoutline": "GBO",
"topcopper": "GTL",
"bottomcopper": "GBL",
"innerlayer2": "G2",
"innerlayer3": "G3",
"topsilkscreen": "GTO",
"bottomsilkscreen": "GBO",
"topsoldermask": "GTS",
"bottomsoldermask": "GBS",
"topsolderpastemask": "GTP",
"bottomsolderpastemask": "GBP",
"drills": "GDD",
"placement": "TXT",
"toollist": "DRL",
}
DEFAULT_EXTENSION = "GER"
#Gerbmerge options
PLACE_FILE = "--place-file="
NO_TRIM_GERBER = "--no-trim-gerber"
NO_TRIM_EXCELLON = "--no-trim-excellon"
ROTATED_OCTAGONS = "--octagons=rotate"
SEARCH_TIMEOUT = "--search-timeout="
| gpl-3.0 |
jmesteve/saas3 | openerp/addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rui-castro/Sick-Beard | lib/cherrypy/_cpthreadinglocal.py | 68 | 6619 | # This is a backport of Python-2.4's threading.local() implementation
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
from threading import currentThread, enumerate, RLock
| gpl-3.0 |
maryklayne/Funcao | sympy/physics/quantum/density.py | 10 | 9893 | from __future__ import print_function, division
from itertools import product
from sympy import Tuple, Add, Mul, Matrix, log, expand, sqrt, Rational
from sympy.core.trace import Tr
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.operator import HermitianOperator, OuterProduct, Operator
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.matrixutils import numpy_ndarray, scipy_sparse_matrix, to_numpy
from sympy.physics.quantum.tensorproduct import TensorProduct, tensor_product_simp
class Density(HermitianOperator):
"""Density operator for representing mixed states.
TODO: Density operator support for Qubits
Parameters
==========
values : tuples/lists
Each tuple/list should be of form (state, prob) or [state,prob]
Examples
=========
Create a density operator with 2 states represented by Kets.
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d
'Density'((|0>, 0.5),(|1>, 0.5))
"""
@classmethod
def _eval_args(cls, args):
# call this to qsympify the args
args = super(Density, cls)._eval_args(args)
for arg in args:
# Check if arg is a tuple
if not (isinstance(arg, Tuple) and
len(arg) == 2):
raise ValueError("Each argument should be of form [state,prob]"
" or ( state, prob )")
return args
def states(self):
"""Return list of all states.
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()
(|0>, |1>)
"""
return Tuple(*[arg[0] for arg in self.args])
def probs(self):
"""Return list of all probabilities.
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()
(0.5, 0.5)
"""
return Tuple(*[arg[1] for arg in self.args])
def get_state(self, index):
"""Return specfic state by index.
Parameters
==========
index : index of state to be returned
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()[1]
|1>
"""
state = self.args[index][0]
return state
def get_prob(self, index):
"""Return probability of specific state by index.
Parameters
===========
index : index of states whose probability is returned.
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()[1]
0.500000000000000
"""
prob = self.args[index][1]
return prob
def apply_op(self, op):
"""op will operate on each individual state.
Parameters
==========
op : Operator
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.apply_op(A)
'Density'((A*|0>, 0.5),(A*|1>, 0.5))
"""
new_args = [(op*state, prob) for (state, prob) in self.args]
return Density(*new_args)
def doit(self, **hints):
"""Expand the density operator into an outer product format.
Examples
=========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.doit()
0.5*|0><0| + 0.5*|1><1|
"""
terms = []
for (state, prob) in self.args:
state = state.expand() # needed to break up (a+b)*c
if (isinstance(state, Add)):
for arg in product(state.args, repeat=2):
terms.append(prob *
self._generate_outer_prod(arg[0], arg[1]))
else:
terms.append(prob *
self._generate_outer_prod(state, state))
return Add(*terms)
def _generate_outer_prod(self, arg1, arg2):
c_part1, nc_part1 = arg1.args_cnc()
c_part2, nc_part2 = arg2.args_cnc()
if ( len(nc_part1) == 0 or
len(nc_part2) == 0 ):
raise ValueError('Atleast one-pair of'
' Non-commutative instance required'
' for outer product.')
# Muls of Tensor Products should be expanded
# before this function is called
if (isinstance(nc_part1[0], TensorProduct) and
len(nc_part1) == 1 and len(nc_part2) == 1):
op = tensor_product_simp(nc_part1[0] * Dagger(nc_part2[0]))
else:
op = Mul(*nc_part1) * Dagger(Mul(*nc_part2))
return Mul(*c_part1)*Mul(*c_part2)*op
def _represent(self, **options):
return represent(self.doit(), **options)
def _print_operator_name_latex(self, printer, *args):
return printer._print(r'\rho', *args)
def _print_operator_name_pretty(self, printer, *args):
return prettyForm(unichr('\u03C1'))
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', [])
return Tr(self.doit(), indices).doit()
def entropy(self):
""" Compute the entropy of a density matrix.
Refer to density.entropy() method for examples.
"""
return entropy(self)
def entropy(density):
"""Compute the entropy of a matrix/density object.
This computes -Tr(density*ln(density)) using the eigenvalue decomposition
of density, which is given as either a Density instance or a matrix
(numpy.ndarray, sympy.Matrix or scipy.sparse).
Parameters
==========
density : density matrix of type Density, sympy matrix,
scipy.sparse or numpy.ndarray
Examples:
========
>>> from sympy.physics.quantum.density import Density, entropy
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.matrixutils import scipy_sparse_matrix
>>> from sympy.physics.quantum.spin import JzKet, Jz
>>> from sympy import S, log
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> d = Density((up,0.5),(down,0.5))
>>> entropy(d)
log(2)/2
"""
if isinstance(density, Density):
density = represent(density) # represent in Matrix
if isinstance(density, scipy_sparse_matrix):
density = to_numpy(density)
if isinstance(density, Matrix):
eigvals = density.eigenvals().keys()
return expand(-sum(e*log(e) for e in eigvals))
elif isinstance(density, numpy_ndarray):
import numpy as np
eigvals = np.linalg.eigvals(density)
return -np.sum(eigvals*np.log(eigvals))
else:
raise ValueError(
"numpy.ndarray, scipy.sparse or sympy matrix expected")
def fidelity(state1, state2):
""" Computes the fidelity [1]_ between two quantum states
The arguments provided to this function should be a square matrix or a
Density object. If it is a square matrix, it is assumed to be diagonalizable.
Parameters:
==========
state1, state2 : a density matrix or Matrix
Examples:
=========
>>> from sympy import S, sqrt
>>> from sympy.physics.quantum.dagger import Dagger
>>> from sympy.physics.quantum.spin import JzKet
>>> from sympy.physics.quantum.density import Density, fidelity
>>> from sympy.physics.quantum.represent import represent
>>>
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> amp = 1/sqrt(2)
>>> updown = (amp * up) + (amp * down)
>>>
>>> # represent turns Kets into matrices
>>> up_dm = represent(up * Dagger(up))
>>> down_dm = represent(down * Dagger(down))
>>> updown_dm = represent(updown * Dagger(updown))
>>>
>>> fidelity(up_dm, up_dm)
1
>>> fidelity(up_dm, down_dm) #orthogonal states
0
>>> fidelity(up_dm, updown_dm).evalf().round(3)
0.707
References
==========
.. [1] http://en.wikipedia.org/wiki/Fidelity_of_quantum_states
"""
state1 = represent(state1) if isinstance(state1, Density) else state1
state2 = represent(state2) if isinstance(state2, Density) else state2
if (not isinstance(state1, Matrix) or
not isinstance(state2, Matrix)):
raise ValueError("state1 and state2 must be of type Density or Matrix "
"received type=%s for state1 and type=%s for state2" %
(type(state1), type(state2)))
if ( state1.shape != state2.shape and state1.is_square):
raise ValueError("The dimensions of both args should be equal and the "
"matrix obtained should be a square matrix")
sqrt_state1 = state1**Rational(1, 2)
return Tr((sqrt_state1 * state2 * sqrt_state1)**Rational(1, 2)).doit()
| bsd-3-clause |
thandang/TComponent | TComponents/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/jar.py | 34 | 3877 | """SCons.Tool.jar
Tool-specific initialization for jar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/jar.py 5023 2010/06/14 22:05:46 scons"
import SCons.Subst
import SCons.Util
def jarSources(target, source, env, for_signature):
"""Only include sources that are not a manifest file."""
try:
env['JARCHDIR']
except KeyError:
jarchdir_set = False
else:
jarchdir_set = True
jarchdir = env.subst('$JARCHDIR', target=target, source=source)
if jarchdir:
jarchdir = env.fs.Dir(jarchdir)
result = []
for src in source:
contents = src.get_text_contents()
if contents[:16] != "Manifest-Version":
if jarchdir_set:
_chdir = jarchdir
else:
try:
_chdir = src.attributes.java_classdir
except AttributeError:
_chdir = None
if _chdir:
# If we are changing the dir with -C, then sources should
# be relative to that directory.
src = SCons.Subst.Literal(src.get_path(_chdir))
result.append('-C')
result.append(_chdir)
result.append(src)
return result
def jarManifest(target, source, env, for_signature):
"""Look in sources for a manifest file, if any."""
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
return src
return ''
def jarFlags(target, source, env, for_signature):
"""If we have a manifest, make sure that the 'm'
flag is specified."""
jarflags = env.subst('$JARFLAGS', target=target, source=source)
for src in source:
contents = src.get_text_contents()
if contents[:16] == "Manifest-Version":
if not 'm' in jarflags:
return jarflags + 'm'
break
return jarflags
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM')}"
env['JARSUFFIX'] = '.jar'
def exists(env):
return env.Detect('jar')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py | 48 | 6810 | # -*- coding: utf-8 -*-
import logging
import os
import warnings
import tempfile
import shutil
import json
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from contextlib import closing
from dateutil.tz import tzfile
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
METADATA_FN = 'METADATA'
# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
# it's close enough for python2.6
tar_open = TarFile.open
if not hasattr(TarFile, '__exit__'):
def tar_open(*args, **kwargs):
return closing(TarFile.open(*args, **kwargs))
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with tar_open(fileobj=zonefile_stream, mode='r') as tf:
# dict comprehension does not work on python2.6
# TODO: get back to the nicer syntax when we ditch python2.6
# self.zones = {zf.name: tzfile(tf.extractfile(zf),
# filename = zf.name)
# for zf in tf.getmembers() if zf.isfile()}
self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
filename=zf.name))
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN)
# deal with links: They'll point to their parent object. Less
# waste of memory
# links = {zl.name: self.zones[zl.linkname]
# for zl in tf.getmembers() if zl.islnk() or zl.issym()}
links = dict((zl.name, self.zones[zl.linkname])
for zl in tf.getmembers() if
zl.islnk() or zl.issym())
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = dict()
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: Remove after deprecation period.
_CLASS_ZONE_INSTANCE = list()
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif
def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata
| mit |
eamuntz/Django-Tut | env/lib/python2.7/site-packages/setuptools/command/alias.py | 285 | 2486 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg: return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args)==1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote,self.args[1:]))
edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source+name+' '+command
| mit |
thaumos/ansible | lib/ansible/modules/network/f5/bigip_management_route.py | 38 | 13077 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_management_route
short_description: Manage system management routes on a BIG-IP
description:
- Configures route settings for the management interface of a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the management route.
type: str
required: True
description:
description:
- Description of the management route.
type: str
gateway:
description:
- Specifies that the system forwards packets to the destination through the
gateway with the specified IP address.
type: str
network:
description:
- The subnet and netmask to be used for the route.
- To specify that the route is the default route for the system, provide the
value C(default).
- Only one C(default) entry is allowed.
- This parameter cannot be changed after it is set. Therefore, if you do need to change
it, it is required that you delete and create a new route.
type: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a management route
bigip_management_route:
name: tacacs
description: Route to TACACS
gateway: 10.10.10.10
network: 11.11.11.0/24
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the management route.
returned: changed
type: str
sample: Route to TACACS
gateway:
description: The new gateway of the management route.
returned: changed
type: str
sample: 10.10.10.10
network:
description: The new network to use for the management route.
returned: changed
type: str
sample: default
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.compat.ipaddress import ip_network
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.compat.ipaddress import ip_network
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'description',
'gateway',
'network',
]
returnables = [
'description',
'gateway',
'network',
]
updatables = [
'description',
'gateway',
'network',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def network(self):
if self._values['network'] is None:
return None
if self._values['network'] == 'default':
return 'default'
try:
addr = ip_network(u"{0}".format(str(self._values['network'])))
return str(addr)
except ValueError:
raise F5ModuleError(
"The 'network' must either be a network address (with CIDR) or the word 'default'."
)
@property
def gateway(self):
if self._values['gateway'] is None:
return None
if is_valid_ip(self._values['gateway']):
return self._values['gateway']
else:
raise F5ModuleError(
"The 'gateway' must an IP address."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def network(self):
if self.want.network is None:
return None
if self.want.network == '0.0.0.0/0' and self.have.network == 'default':
return None
if self.want.network != self.have.network:
raise F5ModuleError(
"'network' cannot be changed after it is set."
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/management-route/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/management-route/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/management-route/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/management-route/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/management-route/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
gateway=dict(),
network=dict(),
description=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/ptpython/validator.py | 3 | 1664 | from __future__ import unicode_literals
from prompt_toolkit.validation import Validator, ValidationError
__all__ = (
'PythonValidator',
)
class PythonValidator(Validator):
"""
Validation of Python input.
:param get_compiler_flags: Callable that returns the currently
active compiler flags.
"""
def __init__(self, get_compiler_flags=None):
self.get_compiler_flags = get_compiler_flags
def validate(self, document):
"""
Check input for Python syntax errors.
"""
# When the input starts with Ctrl-Z, always accept. This means EOF in a
# Python REPL.
if document.text.startswith('\x1a'):
return
try:
if self.get_compiler_flags:
flags = self.get_compiler_flags()
else:
flags = 0
compile(document.text, '<input>', 'exec', flags=flags, dont_inherit=True)
except SyntaxError as e:
# Note, the 'or 1' for offset is required because Python 2.7
# gives `None` as offset in case of '4=4' as input. (Looks like
# fixed in Python 3.)
index = document.translate_row_col_to_index(e.lineno - 1, (e.offset or 1) - 1)
raise ValidationError(index, 'Syntax Error')
except TypeError as e:
# e.g. "compile() expected string without null bytes"
raise ValidationError(0, str(e))
except ValueError as e:
# In Python 2, compiling "\x9" (an invalid escape sequence) raises
# ValueError instead of SyntaxError.
raise ValidationError(0, 'Syntax Error: %s' % e)
| gpl-3.0 |
omprakasha/odoo | addons/fleet/__openerp__.py | 267 | 2245 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Fleet Management',
'version' : '0.1',
'author' : 'OpenERP S.A.',
'sequence': 110,
'category': 'Managing vehicles and contracts',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Vehicle, leasing, insurances, costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, Odoo helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, fuel log
entries, costs and many other features necessary to the management
of your fleet of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, fuel log entry, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends' : [
'base',
'mail',
'board'
],
'data' : [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'fleet_view.xml',
'fleet_cars.xml',
'fleet_data.xml',
'fleet_board_view.xml',
],
'demo': ['fleet_demo.xml'],
'installable' : True,
'application' : True,
}
| agpl-3.0 |
GeoODK/onadata | onadata/libs/serializers/share_xform_serializer.py | 10 | 1537 | from django.core.validators import ValidationError
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from rest_framework import serializers
from onadata.libs.models.share_xform import ShareXForm
from onadata.libs.permissions import ROLES
from onadata.libs.serializers.fields.xform_field import XFormField
class ShareXFormSerializer(serializers.Serializer):
xform = XFormField()
username = serializers.CharField(max_length=255)
role = serializers.CharField(max_length=50)
def restore_object(self, attrs, instance=None):
if instance is not None:
instance.xform = attrs.get('xform', instance.xform)
instance.username = attrs.get('username', instance.username)
instance.role = attrs.get('role', instance.role)
return instance
return ShareXForm(**attrs)
def validate_username(self, attrs, source):
"""Check that the username exists"""
value = attrs[source]
try:
User.objects.get(username=value)
except User.DoesNotExist:
raise ValidationError(_(u"User '%(value)s' does not exist."
% {"value": value}))
return attrs
def validate_role(self, attrs, source):
"""check that the role exists"""
value = attrs[source]
if value not in ROLES:
raise ValidationError(_(u"Unknown role '%(role)s'."
% {"role": value}))
return attrs
| bsd-2-clause |
MLAB-project/gerbmerge3 | gerbmerge/drillcluster.py | 2 | 5506 | #!/usr/bin/env python
"""
Drill clustering routines to reduce total number of drills and remap
drilling commands to the new reduced drill set.
--------------------------------------------------------------------
This program is licensed under the GNU General Public License (GPL)
Version 3. See http://www.fsf.org for details of the license.
Rugged Circuits LLC
http://ruggedcircuits.com/gerbmerge
"""
global _STATUS
_STATUS = True # indicates status messages should be shown
global _DEBUG
_DEBUG = False # indicates debug and status messages should be shown
def cluster(drills, tolerance, debug=_DEBUG):
"""
Take a dictionary of drill names and sizes and cluster them
A tolerance of 0 will effectively disable clustering
Returns clustered drill dictionary
"""
global _DEBUG
_DEBUG = debug
clusters = []
debug_print("\n " + str(len(drills)) + " Original drills:")
debug_print(drillsToString(drills))
debug_print("Clustering drill sizes ...", True)
# Loop through all drill sizes
sizes = sorted(drills.keys())
for size in sizes:
match = False
# See if size fits into any current clusters, else make new cluster
for index in range(len(clusters)):
c = clusters[index]
if not len(c):
break
mn = min(c)
mx = max(c)
if (size >= mx - 2 * tolerance) and (size <= mn + 2 * tolerance):
debug_print(str_d(size) + " belongs with " + str_d(c))
clusters[index].append(size)
match = True
break
if not match:
debug_print(str_d(size) + " belongs in a new cluster")
clusters.append([size])
debug_print("\n Creating new drill dictionary ...")
new_drills = {}
tool_num = 0
# Create new dictionary of clustered drills
for c in clusters:
tool_num += 1
new_drill = "T{:02d}".format(tool_num)
c.sort()
new_size = (min(c) + max(c)) / 2.0
new_drills[new_size] = new_drill
debug_print("{:s} will be represented by {:s} ({:s})".format(str_d(c), new_drill, str_d(new_size)))
debug_print("\n {:d} Clustered Drills:".format(len(new_drills)))
debug_print(drillsToString(new_drills))
debug_print("Drill count reduced from {:d} to {:d}".format(len(drills), len(new_drills)), True)
return new_drills
def remap(jobs, globalToolMap, debug=_DEBUG):
"""
Remap tools and commands in all jobs to match new tool map
Returns None
"""
# Set global variables from parameters
global _DEBUG
_DEBUG = debug
debug_print("Remapping tools and commands ...", True)
for job in jobs:
job = job.job # Access job inside job layout
debug_print("\n Job name: {:s}".format(job.name))
debug_print("\n Original job tools:")
debug_print(str(job.xdiam))
debug_print("\n Original commands:")
debug_print(str(job.xcommands))
new_tools = {}
new_commands = {}
for tool, diam in job.xdiam.items():
# Search for best matching tool
best_diam, best_tool = globalToolMap[0]
for glob_diam, glob_tool in globalToolMap:
if abs(glob_diam - diam) < abs(best_diam - diam):
best_tool = glob_tool
best_diam = glob_diam
new_tools[best_tool] = best_diam
# Append commands to existing commands if they exist
if best_tool in new_commands:
temp = new_commands[best_tool]
temp.extend(job.xcommands[tool])
new_commands[best_tool] = temp
else:
new_commands[best_tool] = job.xcommands[tool]
debug_print("\n New job tools:")
debug_print(str(new_tools))
debug_print("\n New commands:")
debug_print(str(new_commands))
job.xdiam = new_tools
job.xcommands = new_commands
def debug_print(text, status=False, newLine=True):
"""
Print debugging statemetns
Returs None, Printts text
"""
if _DEBUG or (status and _STATUS):
if newLine:
print(" ", text)
else:
print(" ", text)
def str_d(drills):
"""
Format drill sizes for printing debug and status messages
Returns drills as formatted string
"""
string = ""
try:
len(drills)
except:
string = "{:.4f}".format(drills)
else:
string = "["
for drill in drills:
string += ("{:.4f}, ".format(drill))
string = string[:len(string) - 2] + "]"
return string
def drillsToString(drills):
"""
Format drill dictionary for printing debug and status messages
Returns drills as formatted string
"""
string = ""
drills = sorted(drills.items())
for size, drill in drills:
string += "{:s} = {:s}\n ".format(drill, str_d(size))
return string
"""
The following code runs test drill clusterings with random drill sets.
"""
if __name__ == "__main__":
import random
print(" Clustering random drills...")
old = {}
tool_num = 0
while len(old) < 99:
rand_size = round(random.uniform(.02, .04), 4)
if rand_size in old:
continue
tool_num += 1
old[rand_size] = "T{:02d}".format(tool_num)
new = cluster(old, .0003, True)
| gpl-3.0 |
icloudrnd/automation_tools | openstack_dashboard/dashboards/project/overview/tests.py | 9 | 13372 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.core.urlresolvers import reverse
from django import http
from django.utils import timezone
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.TestCase):
@test.create_stubs({api.nova: ('usage_get',
'tenant_absolute_limits',
'extension_supported')})
def _stub_nova_api_calls(self, nova_stu_enabled=True,
tenant_limits_exception=False,
stu_exception=False):
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
if tenant_limits_exception:
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndRaise(tenant_limits_exception)
else:
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
if nova_stu_enabled:
self._nova_stu_enabled(stu_exception)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',)})
def _stub_cinder_api_calls(self):
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
@test.create_stubs({api.neutron: ('is_extension_supported',),
api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list')})
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def _nova_stu_enabled(self, exception=False):
now = timezone.now()
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if exception:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
start, end) \
.AndRaise(exception)
else:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
start, end) \
.AndReturn(api.nova.NovaUsage(self.usages.first()))
def _common_assertions(self, nova_stu_enabled,
maxTotalFloatingIps=float("inf")):
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(usages, usage.ProjectUsage))
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
if nova_stu_enabled:
self.assertContains(res, 'form-inline')
else:
self.assertNotContains(res, 'form-inline')
self.assertEqual(usages.limits['maxTotalFloatingIps'],
maxTotalFloatingIps)
def test_usage(self):
self._test_usage(nova_stu_enabled=True)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False)
def _test_usage(self, nova_stu_enabled):
self._stub_nova_api_calls(nova_stu_enabled)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
self._common_assertions(nova_stu_enabled)
def test_usage_nova_network(self):
self._test_usage_nova_network(nova_stu_enabled=True)
def test_usage_nova_network_disabled(self):
self._test_usage_nova_network(nova_stu_enabled=False)
@test.create_stubs({api.base: ('is_service_enabled',)})
def _test_usage_nova_network(self, nova_stu_enabled):
self._stub_nova_api_calls(nova_stu_enabled)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
self._common_assertions(nova_stu_enabled, maxTotalFloatingIps=10)
def test_unauthorized(self):
self._stub_nova_api_calls(
stu_exception=self.exceptions.nova_unauthorized)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
url = reverse('horizon:project:overview:index')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertMessageCount(res, error=1)
self.assertContains(res, 'Unauthorized:')
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
self._stub_nova_api_calls(nova_stu_enabled)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index') +
"?format=csv")
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
def test_usage_exception_usage(self):
self._stub_nova_api_calls(stu_exception=self.exceptions.nova)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].usage_list, [])
def test_usage_exception_quota(self):
self._stub_nova_api_calls(tenant_limits_exception=self.exceptions.nova)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].quotas, {})
def test_usage_default_tenant(self):
self._stub_nova_api_calls()
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron(self):
self._test_usage_with_neutron(neutron_sg_enabled=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_nova_security_group(self):
self._test_usage_with_neutron(neutron_sg_enabled=False)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_floating_ip_disabled(self):
self._test_usage_with_neutron(neutron_fip_enabled=False)
@test.create_stubs({api.neutron: ('tenant_quota_get',
'is_extension_supported'),
api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list')})
def _test_usage_with_neutron_prepare(self):
self._stub_nova_api_calls()
self._stub_cinder_api_calls()
def _test_usage_with_neutron(self, neutron_sg_enabled=True,
neutron_fip_enabled=True):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(neutron_fip_enabled)
if neutron_fip_enabled:
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
self.mox.ReplayAll()
self._test_usage_with_neutron_check(neutron_sg_enabled)
def _test_usage_with_neutron_check(self, neutron_sg_enabled=True,
max_fip_expected=50,
max_sg_expected=20):
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertContains(res, 'Floating IPs')
self.assertContains(res, 'Security Groups')
res_limits = res.context['usage'].limits
# Make sure the floating IPs comes from Neutron (50 vs. 10)
max_floating_ips = res_limits['maxTotalFloatingIps']
self.assertEqual(max_floating_ips, max_fip_expected)
if neutron_sg_enabled:
# Make sure the security group limit comes from Neutron (20 vs. 10)
max_security_groups = res_limits['maxSecurityGroups']
self.assertEqual(max_security_groups, max_sg_expected)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_quotas_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_sg_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
def test_usage_with_cinder(self):
self._test_usage_cinder(cinder_enabled=True)
def test_usage_without_cinder(self):
self._test_usage_cinder(cinder_enabled=False)
@test.create_stubs({api.base: ('is_service_enabled',)})
def _test_usage_cinder(self, cinder_enabled):
self._stub_nova_api_calls(True)
if cinder_enabled:
self._stub_cinder_api_calls()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(cinder_enabled)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertTrue(isinstance(usages, usage.ProjectUsage))
if cinder_enabled:
self.assertEqual(usages.limits['totalVolumesUsed'], 1)
self.assertEqual(usages.limits['maxTotalVolumes'], 10)
self.assertEqual(usages.limits['totalGigabytesUsed'], 5)
self.assertEqual(usages.limits['maxTotalVolumeGigabytes'], 1000)
else:
self.assertNotIn('totalVolumesUsed', usages.limits)
| apache-2.0 |
stosdev/zebra-supervisor | judge/migrations/0002_initial_data.py | 2 | 1109 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.core import serializers
from django.conf import settings
import os
FIXTURE_DIR = getattr(settings, 'FIXTURE_DIR', os.path.dirname(__file__))
def get_fixture_file_paths():
fixture_file_paths = []
for name in os.listdir(FIXTURE_DIR):
path = os.path.join(FIXTURE_DIR, name)
if os.path.isfile(path) and path.endswith(('xml', 'json', 'yaml')):
fixture_file_paths.append(path)
return fixture_file_paths
def initial_data(apps, schema_editor):
for filepath in get_fixture_file_paths():
with open(filepath, 'r') as fixtures:
objects = serializers.deserialize(
os.path.splitext(filepath)[1][1:],
fixtures, ignorenonexistent=True)
for obj in objects:
obj.save()
class Migration(migrations.Migration):
dependencies = [
('judge', '0001_initial'),
('questions', '0001_initial'),
]
operations = [
migrations.RunPython(initial_data),
]
| gpl-3.0 |
gitgitcode/myflask | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jpcntx.py | 1777 | 19348 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
Peddle/hue | desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_elementtree.py | 11 | 129094 | # -*- coding: utf-8 -*-
"""
Tests for the ElementTree API
Only test cases that apply equally well to etree and ElementTree
belong here. Note that there is a second test module called test_io.py
for IO related test cases.
"""
import unittest
import os, re, tempfile, copy, operator, gc, sys
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import StringIO, BytesIO, etree
from common_imports import ElementTree, cElementTree, ET_VERSION, CET_VERSION
from common_imports import filter_by_version, fileInTestDir, canonicalize, HelperTestCase
from common_imports import _str, _bytes, unicode, next
if cElementTree is not None and (CET_VERSION <= (1,0,7) or sys.version_info >= (3,3)):
cElementTree = None
if ElementTree is not None:
print("Comparing with ElementTree %s" % getattr(ElementTree, "VERSION", "?"))
if cElementTree is not None:
print("Comparing with cElementTree %s" % getattr(cElementTree, "VERSION", "?"))
try:
reversed
except NameError:
# Python 2.3
def reversed(seq):
seq = list(seq)[::-1]
return seq
class _ETreeTestCaseBase(HelperTestCase):
etree = None
required_versions_ET = {}
required_versions_cET = {}
def XMLParser(self, **kwargs):
try:
XMLParser = self.etree.XMLParser
except AttributeError:
assert 'ElementTree' in self.etree.__name__
XMLParser = self.etree.TreeBuilder
return XMLParser(**kwargs)
def test_element(self):
for i in range(10):
e = self.etree.Element('foo')
self.assertEqual(e.tag, 'foo')
self.assertEqual(e.text, None)
self.assertEqual(e.tail, None)
def test_simple(self):
Element = self.etree.Element
root = Element('root')
root.append(Element('one'))
root.append(Element('two'))
root.append(Element('three'))
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
self.assertRaises(IndexError, operator.getitem, root, 3)
# test weird dictionary interaction leading to segfault previously
def test_weird_dict_interaction(self):
root = self.etree.Element('root')
self.assertEqual(root.tag, "root")
add = self.etree.ElementTree(file=BytesIO('<foo>Foo</foo>'))
self.assertEqual(add.getroot().tag, "foo")
self.assertEqual(add.getroot().text, "Foo")
root.append(self.etree.Element('baz'))
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "baz")
def test_subelement(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'one')
SubElement(root, 'two')
SubElement(root, 'three')
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
def test_element_contains(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root1 = Element('root')
SubElement(root1, 'one')
self.assertTrue(root1[0] in root1)
root2 = Element('root')
SubElement(root2, 'two')
SubElement(root2, 'three')
self.assertTrue(root2[0] in root2)
self.assertTrue(root2[1] in root2)
self.assertFalse(root1[0] in root2)
self.assertFalse(root2[0] in root1)
self.assertFalse(None in root2)
def test_element_indexing_with_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc>Test<one>One</one></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(1, len(root))
self.assertEqual('one', root[0].tag)
self.assertRaises(IndexError, operator.getitem, root, 1)
def test_element_indexing_with_text2(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc><one>One</one><two>Two</two>hm<three>Three</three></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
def test_element_indexing_only_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc>Test</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(0, len(root))
def test_element_indexing_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(d, a[-1])
self.assertEqual(c, a[-2])
self.assertEqual(b, a[-3])
self.assertRaises(IndexError, operator.getitem, a, -4)
a[-1] = e = Element('e')
self.assertEqual(e, a[-1])
del a[-1]
self.assertEqual(2, len(a))
def test_elementtree(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc><one>One</one><two>Two</two></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(2, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
def test_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc>This is a text</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('This is a text', root.text)
def test_text_empty(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(None, root.text)
def test_text_other(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc><one>One</one></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(None, root.text)
self.assertEqual('One', root[0].text)
def test_text_escape_in(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc>This is > than a text</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('This is > than a text', root.text)
def test_text_escape_out(self):
Element = self.etree.Element
a = Element("a")
a.text = "<>&"
self.assertXML(_bytes('<a><>&</a>'),
a)
def test_text_escape_tostring(self):
tostring = self.etree.tostring
Element = self.etree.Element
a = Element("a")
a.text = "<>&"
self.assertEqual(_bytes('<a><>&</a>'),
tostring(a))
def test_text_str_subclass(self):
Element = self.etree.Element
class strTest(str):
pass
a = Element("a")
a.text = strTest("text")
self.assertXML(_bytes('<a>text</a>'),
a)
def test_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc>This is <i>mixed</i> content.</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(' content.', root[0].tail)
def test_tail_str_subclass(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
class strTest(str):
pass
a = Element("a")
SubElement(a, "t").tail = strTest("tail")
self.assertXML(_bytes('<a><t></t>tail</a>'),
a)
def _test_del_tail(self):
# this is discouraged for ET compat, should not be tested...
XML = self.etree.XML
root = XML(_bytes('<doc>This is <i>mixed</i> content.</doc>'))
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(' content.', root[0].tail)
del root[0].tail
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(None, root[0].tail)
root[0].tail = "TAIL"
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual('TAIL', root[0].tail)
def test_ElementTree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
el = Element('hoi')
doc = ElementTree(el)
root = doc.getroot()
self.assertEqual(None, root.text)
self.assertEqual('hoi', root.tag)
def test_attrib(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
self.assertRaises(KeyError, operator.getitem, root.attrib, 'three')
def test_attrib_get(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib.get('one'))
self.assertEqual('Two', root.attrib.get('two'))
self.assertEqual(None, root.attrib.get('three'))
self.assertEqual('foo', root.attrib.get('three', 'foo'))
def test_attrib_dict(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = dict(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attrib_copy(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = copy.copy(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attrib_deepcopy(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = copy.deepcopy(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attributes_get(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
self.assertEqual(None, root.get('three'))
self.assertEqual('foo', root.get('three', 'foo'))
def test_attrib_clear(self):
XML = self.etree.XML
root = XML(_bytes('<doc one="One" two="Two"/>'))
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
root.attrib.clear()
self.assertEqual(None, root.get('one'))
self.assertEqual(None, root.get('two'))
def test_attrib_set_clear(self):
Element = self.etree.Element
root = Element("root", one="One")
root.set("two", "Two")
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
root.attrib.clear()
self.assertEqual(None, root.get('one'))
self.assertEqual(None, root.get('two'))
def test_attrib_ns_clear(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
attribNS = '{http://foo/bar}x'
parent = Element('parent')
parent.set(attribNS, 'a')
child = SubElement(parent, 'child')
child.set(attribNS, 'b')
self.assertEqual('a', parent.get(attribNS))
self.assertEqual('b', child.get(attribNS))
parent.clear()
self.assertEqual(None, parent.get(attribNS))
self.assertEqual('b', child.get(attribNS))
def test_attrib_pop(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
self.assertEqual('One', root.attrib.pop('one'))
self.assertEqual(None, root.attrib.get('one'))
self.assertEqual('Two', root.attrib['two'])
def test_attrib_pop_unknown(self):
root = self.etree.XML(_bytes('<doc one="One" two="Two"/>'))
self.assertRaises(KeyError, root.attrib.pop, 'NONE')
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
def test_attrib_pop_default(self):
root = self.etree.XML(_bytes('<doc one="One" two="Two"/>'))
self.assertEqual('Three', root.attrib.pop('three', 'Three'))
def test_attrib_pop_empty_default(self):
root = self.etree.XML(_bytes('<doc/>'))
self.assertEqual('Three', root.attrib.pop('three', 'Three'))
def test_attrib_pop_invalid_args(self):
root = self.etree.XML(_bytes('<doc one="One" two="Two"/>'))
self.assertRaises(TypeError, root.attrib.pop, 'One', None, None)
def test_attribute_update_dict(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta"/>'))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update({'alpha' : 'test', 'gamma' : 'Gamma'})
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_sequence(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta"/>'))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update({'alpha' : 'test', 'gamma' : 'Gamma'}.items())
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_iter(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta"/>'))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update(iter({'alpha' : 'test', 'gamma' : 'Gamma'}.items()))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_attrib(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta"/>'))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
other = XML(_bytes('<doc alpha="test" gamma="Gamma"/>'))
root.attrib.update(other.attrib)
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_keys(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
keys = list(root.attrib.keys())
keys.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], keys)
def test_attribute_keys2(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
keys = list(root.keys())
keys.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], keys)
def test_attribute_items2(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
items = list(root.items())
items.sort()
self.assertEqual(
[('alpha','Alpha'), ('beta','Beta'), ('gamma','Gamma')],
items)
def test_attribute_keys_ns(self):
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
keys = list(root.keys())
keys.sort()
self.assertEqual(['bar', '{http://ns.codespeak.net/test}baz'],
keys)
def test_attribute_values(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
values = list(root.attrib.values())
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
def test_attribute_values_ns(self):
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
values = list(root.attrib.values())
values.sort()
self.assertEqual(
['Bar', 'Baz'], values)
def test_attribute_items(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
items = list(root.attrib.items())
items.sort()
self.assertEqual([
('alpha', 'Alpha'),
('beta', 'Beta'),
('gamma', 'Gamma'),
],
items)
def test_attribute_items_ns(self):
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('bar', 'Bar'), ('{http://ns.codespeak.net/test}baz', 'Baz')],
items)
def test_attribute_str(self):
XML = self.etree.XML
expected = "{'{http://ns.codespeak.net/test}baz': 'Baz', 'bar': 'Bar'}"
alternative = "{'bar': 'Bar', '{http://ns.codespeak.net/test}baz': 'Baz'}"
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
try:
self.assertEqual(expected, str(root.attrib))
except AssertionError:
self.assertEqual(alternative, str(root.attrib))
def test_attribute_contains(self):
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
self.assertEqual(
True, 'bar' in root.attrib)
self.assertEqual(
False, 'baz' in root.attrib)
self.assertEqual(
False, 'hah' in root.attrib)
self.assertEqual(
True,
'{http://ns.codespeak.net/test}baz' in root.attrib)
def test_attribute_set(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
def test_attrib_as_attrib(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.attrib["attr"])
root2 = Element("root2", root.attrib)
self.assertEqual("TEST", root2.attrib["attr"])
def test_attribute_iterator(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma" />'))
result = []
for key in root.attrib:
result.append(key)
result.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], result)
def test_attribute_manipulation(self):
Element = self.etree.Element
a = Element('a')
a.attrib['foo'] = 'Foo'
a.attrib['bar'] = 'Bar'
self.assertEqual('Foo', a.attrib['foo'])
del a.attrib['foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_del_attribute_ns(self):
Element = self.etree.Element
a = Element('a')
a.attrib['{http://a/}foo'] = 'Foo'
a.attrib['{http://a/}bar'] = 'Bar'
self.assertEqual(None, a.get('foo'))
self.assertEqual('Foo', a.get('{http://a/}foo'))
self.assertEqual('Foo', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.delitem, a.attrib, 'foo')
self.assertEqual('Foo', a.attrib['{http://a/}foo'])
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_del_attribute_ns_parsed(self):
XML = self.etree.XML
a = XML(_bytes('<a xmlns:nsa="http://a/" nsa:foo="FooNS" foo="Foo" />'))
self.assertEqual('Foo', a.attrib['foo'])
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
del a.attrib['foo']
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
self.assertRaises(KeyError, operator.delitem, a.attrib, 'foo')
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, '{http://a/}foo')
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
a = XML(_bytes('<a xmlns:nsa="http://a/" foo="Foo" nsa:foo="FooNS" />'))
self.assertEqual('Foo', a.attrib['foo'])
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
del a.attrib['foo']
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, '{http://a/}foo')
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_XML(self):
XML = self.etree.XML
root = XML(_bytes('<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_XMLID(self):
XMLID = self.etree.XMLID
XML = self.etree.XML
xml_text = _bytes('''
<document>
<h1 id="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p id="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"note1" : root[1],
"warn1" : root[4]
}
self.assertEqual(dic, expected)
def test_fromstring(self):
fromstring = self.etree.fromstring
root = fromstring('<doc>This is a text.</doc>')
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist'] = (1,3)
def test_fromstringlist(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(["<do", "c>T", "hi", "s is",
" a text.<", "/doc", ">"])
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist_characters'] = (1,3)
def test_fromstringlist_characters(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(list('<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist_single'] = (1,3)
def test_fromstringlist_single(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(['<doc>This is a text.</doc>'])
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_iselement(self):
iselement = self.etree.iselement
Element = self.etree.Element
ElementTree = self.etree.ElementTree
XML = self.etree.XML
Comment = self.etree.Comment
ProcessingInstruction = self.etree.ProcessingInstruction
el = Element('hoi')
self.assertTrue(iselement(el))
el2 = XML(_bytes('<foo/>'))
self.assertTrue(iselement(el2))
tree = ElementTree(element=Element('dag'))
self.assertTrue(not iselement(tree))
self.assertTrue(iselement(tree.getroot()))
c = Comment('test')
self.assertTrue(iselement(c))
p = ProcessingInstruction("test", "some text")
self.assertTrue(iselement(p))
def test_iteration(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root:
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iteration_empty(self):
XML = self.etree.XML
root = XML(_bytes('<doc></doc>'))
result = []
for el in root:
result.append(el.tag)
self.assertEqual([], result)
def test_iteration_text_only(self):
XML = self.etree.XML
root = XML(_bytes('<doc>Text</doc>'))
result = []
for el in root:
result.append(el.tag)
self.assertEqual([], result)
def test_iteration_crash(self):
# this would cause a crash in the past
fromstring = self.etree.fromstring
root = etree.fromstring('<html><p></p>x</html>')
for elem in root:
elem.tail = ''
def test_iteration_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in reversed(root):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iteration_subelement(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
add = True
for el in root:
result.append(el.tag)
if add:
self.etree.SubElement(root, 'four')
add = False
self.assertEqual(['one', 'two', 'three', 'four'], result)
def test_iteration_del_child(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root:
result.append(el.tag)
del root[-1]
self.assertEqual(['one', 'two'], result)
def test_iteration_double(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two/></doc>'))
result = []
for el0 in root:
result.append(el0.tag)
for el1 in root:
result.append(el1.tag)
self.assertEqual(['one','one', 'two', 'two', 'one', 'two'], result)
required_versions_ET['test_itertext'] = (1,3)
def test_itertext(self):
# ET 1.3+
XML = self.etree.XML
root = XML(_bytes("<root>RTEXT<a></a>ATAIL<b/><c>CTEXT</c>CTAIL</root>"))
text = list(root.itertext())
self.assertEqual(["RTEXT", "ATAIL", "CTEXT", "CTAIL"],
text)
required_versions_ET['test_itertext_child'] = (1,3)
def test_itertext_child(self):
# ET 1.3+
XML = self.etree.XML
root = XML(_bytes("<root>RTEXT<a></a>ATAIL<b/><c>CTEXT</c>CTAIL</root>"))
text = list(root[2].itertext())
self.assertEqual(["CTEXT"],
text)
def test_findall(self):
XML = self.etree.XML
root = XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>'))
self.assertEqual(len(list(root.findall("c"))), 1)
self.assertEqual(len(list(root.findall(".//c"))), 2)
self.assertEqual(len(list(root.findall(".//b"))), 3)
self.assertEqual(len(list(root.findall(".//b"))[0]), 1)
self.assertEqual(len(list(root.findall(".//b"))[1]), 0)
self.assertEqual(len(list(root.findall(".//b"))[2]), 0)
def test_findall_ns(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>'))
self.assertEqual(len(list(root.findall(".//{X}b"))), 2)
self.assertEqual(len(list(root.findall(".//b"))), 3)
self.assertEqual(len(list(root.findall("b"))), 2)
def test_element_with_attributes_keywords(self):
Element = self.etree.Element
el = Element('tag', foo='Foo', bar='Bar')
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Bar', el.attrib['bar'])
def test_element_with_attributes(self):
Element = self.etree.Element
el = Element('tag', {'foo':'Foo', 'bar':'Bar'})
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Bar', el.attrib['bar'])
def test_element_with_attributes_ns(self):
Element = self.etree.Element
el = Element('tag', {'{ns1}foo':'Foo', '{ns2}bar':'Bar'})
self.assertEqual('Foo', el.attrib['{ns1}foo'])
self.assertEqual('Bar', el.attrib['{ns2}bar'])
def test_subelement_with_attributes(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('tag')
SubElement(el, 'foo', {'foo':'Foo'}, baz="Baz")
self.assertEqual("Baz", el[0].attrib['baz'])
self.assertEqual('Foo', el[0].attrib['foo'])
def test_subelement_with_attributes_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('tag')
SubElement(el, 'foo', {'{ns1}foo':'Foo', '{ns2}bar':'Bar'})
self.assertEqual('Foo', el[0].attrib['{ns1}foo'])
self.assertEqual('Bar', el[0].attrib['{ns2}bar'])
def test_write(self):
ElementTree = self.etree.ElementTree
XML = self.etree.XML
for i in range(10):
f = BytesIO()
root = XML(_bytes('<doc%s>This is a test.</doc%s>' % (i, i)))
tree = ElementTree(element=root)
tree.write(f)
data = f.getvalue()
self.assertEqual(
_bytes('<doc%s>This is a test.</doc%s>' % (i, i)),
canonicalize(data))
required_versions_ET['test_write_method_html'] = (1,3)
def test_write_method_html(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
SubElement = self.etree.SubElement
html = Element('html')
body = SubElement(html, 'body')
p = SubElement(body, 'p')
p.text = "html"
SubElement(p, 'br').tail = "test"
tree = ElementTree(element=html)
f = BytesIO()
tree.write(f, method="html")
data = f.getvalue().replace(_bytes('\n'),_bytes(''))
self.assertEqual(_bytes('<html><body><p>html<br>test</p></body></html>'),
data)
required_versions_ET['test_write_method_text'] = (1,3)
def test_write_method_text(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = "TAIL"
c = SubElement(a, 'c')
c.text = "C"
tree = ElementTree(element=a)
f = BytesIO()
tree.write(f, method="text")
data = f.getvalue()
self.assertEqual(_bytes('ABTAILCtail'),
data)
def test_write_fail(self):
ElementTree = self.etree.ElementTree
XML = self.etree.XML
tree = ElementTree( XML(_bytes('<doc>This is a test.</doc>')) )
self.assertRaises(IOError, tree.write,
"definitely////\\-\\nonexisting\\-\\////FILE")
# this could trigger a crash, apparently because the document
# reference was prematurely garbage collected
def test_crash(self):
Element = self.etree.Element
element = Element('tag')
for i in range(10):
element.attrib['key'] = 'value'
value = element.attrib['key']
self.assertEqual(value, 'value')
# from doctest; for some reason this caused crashes too
def test_write_ElementTreeDoctest(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
f = BytesIO()
for i in range(10):
element = Element('tag%s' % i)
self._check_element(element)
tree = ElementTree(element)
tree.write(f)
self._check_element_tree(tree)
def test_subelement_reference(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('foo')
el2 = SubElement(el, 'bar')
el3 = SubElement(el2, 'baz')
al = Element('foo2')
al2 = SubElement(al, 'bar2')
al3 = SubElement(al2, 'baz2')
# now move al2 into el
el.append(al2)
# now change al3 directly
al3.text = 'baz2-modified'
# it should have changed through this route too
self.assertEqual(
'baz2-modified',
el[1][0].text)
def test_set_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.text = 'hoi'
self.assertEqual(
'hoi',
a.text)
self.assertEqual(
'b',
a[0].tag)
def test_set_text2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'hoi'
b = SubElement(a ,'b')
self.assertEqual(
'hoi',
a.text)
self.assertEqual(
'b',
a[0].tag)
def test_set_text_none(self):
Element = self.etree.Element
a = Element('a')
a.text = 'foo'
a.text = None
self.assertEqual(
None,
a.text)
self.assertXML(_bytes('<a></a>'), a)
def test_set_text_empty(self):
Element = self.etree.Element
a = Element('a')
self.assertEqual(None, a.text)
a.text = ''
self.assertEqual('', a.text)
self.assertXML(_bytes('<a></a>'), a)
def test_tail1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.tail = 'dag'
self.assertEqual('dag',
a.tail)
b = SubElement(a, 'b')
b.tail = 'hoi'
self.assertEqual('hoi',
b.tail)
self.assertEqual('dag',
a.tail)
def test_tail_append(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
b.tail = 'b_tail'
a.append(b)
self.assertEqual('b_tail',
b.tail)
def test_tail_set_twice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'foo'
b.tail = 'bar'
self.assertEqual('bar',
b.tail)
self.assertXML(_bytes('<a><b></b>bar</a>'), a)
def test_tail_set_none(self):
Element = self.etree.Element
a = Element('a')
a.tail = 'foo'
a.tail = None
self.assertEqual(
None,
a.tail)
self.assertXML(_bytes('<a></a>'), a)
required_versions_ET['test_extend'] = (1,3)
def test_extend(self):
root = self.etree.Element('foo')
for i in range(3):
element = self.etree.SubElement(root, 'a%s' % i)
element.text = "text%d" % i
element.tail = "tail%d" % i
elements = []
for i in range(3):
new_element = self.etree.Element("test%s" % i)
new_element.text = "TEXT%s" % i
new_element.tail = "TAIL%s" % i
elements.append(new_element)
root.extend(elements)
self.assertEqual(
["a0", "a1", "a2", "test0", "test1", "test2"],
[ el.tag for el in root ])
self.assertEqual(
["text0", "text1", "text2", "TEXT0", "TEXT1", "TEXT2"],
[ el.text for el in root ])
self.assertEqual(
["tail0", "tail1", "tail2", "TAIL0", "TAIL1", "TAIL2"],
[ el.tail for el in root ])
def test_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
a = Element('a')
a.append(Comment('foo'))
self.assertEqual(a[0].tag, Comment)
self.assertEqual(a[0].text, 'foo')
# ElementTree < 1.3 adds whitespace around comments
required_versions_ET['test_comment_text'] = (1,3)
def test_comment_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
tostring = self.etree.tostring
a = Element('a')
a.append(Comment('foo'))
self.assertEqual(a[0].text, 'foo')
self.assertEqual(
_bytes('<a><!--foo--></a>'),
tostring(a))
a[0].text = "TEST"
self.assertEqual(a[0].text, 'TEST')
self.assertEqual(
_bytes('<a><!--TEST--></a>'),
tostring(a))
# ElementTree < 1.3 adds whitespace around comments
required_versions_ET['test_comment_whitespace'] = (1,3)
def test_comment_whitespace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
tostring = self.etree.tostring
a = Element('a')
a.append(Comment(' foo '))
self.assertEqual(a[0].text, ' foo ')
self.assertEqual(
_bytes('<a><!-- foo --></a>'),
tostring(a))
def test_comment_nonsense(self):
Comment = self.etree.Comment
c = Comment('foo')
self.assertEqual({}, c.attrib)
self.assertEqual([], list(c.keys()))
self.assertEqual([], list(c.items()))
self.assertEqual(None, c.get('hoi'))
self.assertEqual(0, len(c))
# should not iterate
for i in c:
pass
def test_pi(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.ProcessingInstruction
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].tag, ProcessingInstruction)
self.assertXML(_bytes("<a><?foo some more text?></a>"),
a)
def test_processinginstruction(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.PI
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].tag, ProcessingInstruction)
self.assertXML(_bytes("<a><?foo some more text?></a>"),
a)
def test_pi_nonsense(self):
ProcessingInstruction = self.etree.ProcessingInstruction
pi = ProcessingInstruction('foo')
self.assertEqual({}, pi.attrib)
self.assertEqual([], list(pi.keys()))
self.assertEqual([], list(pi.items()))
self.assertEqual(None, pi.get('hoi'))
self.assertEqual(0, len(pi))
# should not iterate
for i in pi:
pass
def test_setitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
a[0] = c
self.assertEqual(
c,
a[0])
self.assertXML(_bytes('<a><c></c></a>'),
a)
self.assertXML(_bytes('<b></b>'),
b)
def test_setitem2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
for i in range(5):
b = SubElement(a, 'b%s' % i)
c = SubElement(b, 'c')
for i in range(5):
d = Element('d')
e = SubElement(d, 'e')
a[i] = d
self.assertXML(
_bytes('<a><d><e></e></d><d><e></e></d><d><e></e></d><d><e></e></d><d><e></e></d></a>'),
a)
self.assertXML(_bytes('<c></c>'),
c)
def test_setitem_replace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
SubElement(a, 'b')
d = Element('d')
a[0] = d
self.assertXML(_bytes('<a><d></d></a>'), a)
def test_setitem_indexerror(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
self.assertRaises(IndexError, operator.setitem, a, 1, Element('c'))
def test_setitem_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'B2'
c = Element('c')
c.tail = 'C2'
a[0] = c
self.assertXML(
_bytes('<a><c></c>C2</a>'),
a)
def test_tag_write(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.tag = 'c'
self.assertEqual(
'c',
a.tag)
self.assertXML(
_bytes('<c><b></b></c>'),
a)
def test_tag_reset_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('{a}a')
b1 = SubElement(a, '{a}b')
b2 = SubElement(a, '{b}b')
self.assertEqual('{a}b', b1.tag)
b1.tag = 'c'
# can't use C14N here!
self.assertEqual('c', b1.tag)
self.assertEqual(_bytes('<c'), tostring(b1)[:2])
self.assertTrue(_bytes('<c') in tostring(a))
def test_tag_reset_root_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('{a}a')
b1 = SubElement(a, '{a}b')
b2 = SubElement(a, '{b}b')
a.tag = 'c'
self.assertEqual(
'c',
a.tag)
# can't use C14N here!
self.assertEqual('c', a.tag)
self.assertEqual(_bytes('<c'), tostring(a)[:2])
def test_tag_str_subclass(self):
Element = self.etree.Element
class strTest(str):
pass
a = Element("a")
a.tag = strTest("TAG")
self.assertXML(_bytes('<TAG></TAG>'),
a)
def test_delitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
del a[1]
self.assertXML(
_bytes('<a><b></b><d></d></a>'),
a)
del a[0]
self.assertXML(
_bytes('<a><d></d></a>'),
a)
del a[0]
self.assertXML(
_bytes('<a></a>'),
a)
# move deleted element into other tree afterwards
other = Element('other')
other.append(c)
self.assertXML(
_bytes('<other><c></c></other>'),
other)
def test_del_insert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
self.assertXML(
_bytes('<a><b><bs></bs></b><c><cs></cs></c></a>'),
a)
self.assertXML(_bytes('<b><bs></bs></b>'), b)
self.assertXML(_bytes('<c><cs></cs></c>'), c)
del a[0]
self.assertXML(
_bytes('<a><c><cs></cs></c></a>'),
a)
self.assertXML(_bytes('<b><bs></bs></b>'), b)
self.assertXML(_bytes('<c><cs></cs></c>'), c)
a.insert(0, el)
self.assertXML(
_bytes('<a><b><bs></bs></b><c><cs></cs></c></a>'),
a)
self.assertXML(_bytes('<b><bs></bs></b>'), b)
self.assertXML(_bytes('<c><cs></cs></c>'), c)
def test_del_setitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
del a[0]
a[0] = el
self.assertXML(
_bytes('<a><b><bs></bs></b></a>'),
a)
self.assertXML(_bytes('<b><bs></bs></b>'), b)
self.assertXML(_bytes('<c><cs></cs></c>'), c)
def test_del_setslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
del a[0]
a[0:0] = [el]
self.assertXML(
_bytes('<a><b><bs></bs></b><c><cs></cs></c></a>'),
a)
self.assertXML(_bytes('<b><bs></bs></b>'), b)
self.assertXML(_bytes('<c><cs></cs></c>'), c)
def test_replace_slice_tail(self):
XML = self.etree.XML
a = XML(_bytes('<a><b></b>B2<c></c>C2</a>'))
b, c = a
a[:] = []
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
def test_merge_namespaced_subtree_as_slice(self):
XML = self.etree.XML
root = XML(_bytes(
'<foo><bar xmlns:baz="http://huhu"><puh><baz:bump1 /><baz:bump2 /></puh></bar></foo>'))
root[:] = root.findall('.//puh') # delete bar from hierarchy
# previously, this lost a namespace declaration on bump2
result = self.etree.tostring(root)
foo = self.etree.fromstring(result)
self.assertEqual('puh', foo[0].tag)
self.assertEqual('{http://huhu}bump1', foo[0][0].tag)
self.assertEqual('{http://huhu}bump2', foo[0][1].tag)
def test_delitem_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b>B2<c></c>C2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
del a[0]
self.assertXML(
_bytes('<a><c></c>C2</a>'),
a)
def test_clear(self):
Element = self.etree.Element
a = Element('a')
a.text = 'foo'
a.tail = 'bar'
a.set('hoi', 'dag')
a.clear()
self.assertEqual(None, a.text)
self.assertEqual(None, a.tail)
self.assertEqual(None, a.get('hoi'))
self.assertEqual('a', a.tag)
def test_clear_sub(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'foo'
a.tail = 'bar'
a.set('hoi', 'dag')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
a.clear()
self.assertEqual(None, a.text)
self.assertEqual(None, a.tail)
self.assertEqual(None, a.get('hoi'))
self.assertEqual('a', a.tag)
self.assertEqual(0, len(a))
self.assertXML(_bytes('<a></a>'),
a)
self.assertXML(_bytes('<b><c></c></b>'),
b)
def test_clear_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b>B2<c></c>C2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
a.clear()
self.assertXML(
_bytes('<a></a>'),
a)
def test_insert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
a.insert(0, d)
self.assertEqual(
d,
a[0])
self.assertXML(
_bytes('<a><d></d><b></b><c></c></a>'),
a)
e = Element('e')
a.insert(2, e)
self.assertEqual(
e,
a[2])
self.assertXML(
_bytes('<a><d></d><b></b><e></e><c></c></a>'),
a)
def test_insert_beyond_index(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
a.insert(2, c)
self.assertEqual(
c,
a[1])
self.assertXML(
_bytes('<a><b></b><c></c></a>'),
a)
def test_insert_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
a.insert(-1, d)
self.assertEqual(
d,
a[-2])
self.assertXML(
_bytes('<a><b></b><d></d><c></c></a>'),
a)
def test_insert_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
c.tail = 'C2'
a.insert(0, c)
self.assertXML(
_bytes('<a><c></c>C2<b></b></a>'),
a)
def test_remove(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
a.remove(b)
self.assertEqual(
c,
a[0])
self.assertXML(
_bytes('<a><c></c></a>'),
a)
def test_remove_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{http://test}a')
b = SubElement(a, '{http://test}b')
c = SubElement(a, '{http://test}c')
a.remove(b)
self.assertXML(
_bytes('<ns0:a xmlns:ns0="http://test"><ns0:c></ns0:c></ns0:a>'),
a)
self.assertXML(
_bytes('<ns0:b xmlns:ns0="http://test"></ns0:b>'),
b)
def test_remove_nonexisting(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
self.assertRaises(
ValueError, a.remove, d)
def test_remove_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'b2'
a.remove(b)
self.assertXML(
_bytes('<a></a>'),
a)
self.assertEqual('b2', b.tail)
def _test_getchildren(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertXML(
_bytes('<a><b><d></d></b><c><e></e></c></a>'),
a)
self.assertEqual(
[b, c],
a.getchildren())
self.assertEqual(
[d],
b.getchildren())
self.assertEqual(
[],
d.getchildren())
def test_makeelement(self):
Element = self.etree.Element
a = Element('a')
b = a.makeelement('c', {'hoi':'dag'})
self.assertXML(
_bytes('<c hoi="dag"></c>'),
b)
required_versions_ET['test_iter'] = (1,3)
def test_iter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.iter()))
self.assertEqual(
[d],
list(d.iter()))
def test_getiterator(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator()))
self.assertEqual(
[d],
list(d.getiterator()))
def test_getiterator_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.getiterator('none')))
self.assertEqual(
[],
list(e.getiterator('none')))
self.assertEqual(
[e],
list(e.getiterator()))
def test_getiterator_filter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a],
list(a.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(a.getiterator('a')))
self.assertEqual(
[a2],
list(c.getiterator('a')))
def test_getiterator_filter_all(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator('*')))
def test_getiterator_filter_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
comment_b = Comment("TEST-b")
b.append(comment_b)
self.assertEqual(
[comment_b],
list(a.getiterator(Comment)))
comment_a = Comment("TEST-a")
a.append(comment_a)
self.assertEqual(
[comment_b, comment_a],
list(a.getiterator(Comment)))
self.assertEqual(
[comment_b],
list(b.getiterator(Comment)))
def test_getiterator_filter_pi(self):
Element = self.etree.Element
PI = self.etree.ProcessingInstruction
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
pi_b = PI("TEST-b")
b.append(pi_b)
self.assertEqual(
[pi_b],
list(a.getiterator(PI)))
pi_a = PI("TEST-a")
a.append(pi_a)
self.assertEqual(
[pi_b, pi_a],
list(a.getiterator(PI)))
self.assertEqual(
[pi_b],
list(b.getiterator(PI)))
def test_getiterator_with_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'a'
b = SubElement(a, 'b')
b.text = 'b'
b.tail = 'b1'
c = SubElement(a, 'c')
c.text = 'c'
c.tail = 'c1'
d = SubElement(b, 'd')
c.text = 'd'
c.tail = 'd1'
e = SubElement(c, 'e')
e.text = 'e'
e.tail = 'e1'
self.assertEqual(
[a, b, d, c, e],
list(a.getiterator()))
#self.assertEqual(
# [d],
# list(d.getiterator()))
def test_getiterator_filter_with_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'a'
b = SubElement(a, 'b')
b.text = 'b'
b.tail = 'b1'
c = SubElement(a, 'c')
c.text = 'c'
c.tail = 'c1'
d = SubElement(b, 'd')
c.text = 'd'
c.tail = 'd1'
e = SubElement(c, 'e')
e.text = 'e'
e.tail = 'e1'
self.assertEqual(
[a],
list(a.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(a.getiterator('a')))
self.assertEqual(
[a2],
list(e.getiterator('a')))
def test_getslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(
[b, c],
a[0:2])
self.assertEqual(
[b, c, d],
a[:])
self.assertEqual(
[b, c, d],
a[:10])
self.assertEqual(
[b],
a[0:1])
self.assertEqual(
[],
a[10:12])
def test_getslice_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(
[d],
a[-1:])
self.assertEqual(
[c, d],
a[-2:])
self.assertEqual(
[c],
a[-2:-1])
self.assertEqual(
[b, c],
a[-3:-1])
self.assertEqual(
[b, c],
a[-3:2])
def test_getslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[e,d,c,b],
a[::-1])
self.assertEqual(
[b,d],
a[::2])
self.assertEqual(
[e,c],
a[::-2])
self.assertEqual(
[d,c],
a[-2:0:-1])
self.assertEqual(
[e],
a[:1:-2])
def test_getslice_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b>B</b>B1<c>C</c>C1</a>')
doc = ElementTree(file=f)
a = doc.getroot()
b = a[0]
c = a[1]
self.assertEqual(
[b, c],
a[:])
self.assertEqual(
[b],
a[0:1])
self.assertEqual(
[c],
a[1:])
def test_comment_getitem_getslice(self):
Element = self.etree.Element
Comment = self.etree.Comment
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
foo = Comment('foo')
a.append(foo)
c = SubElement(a, 'c')
self.assertEqual(
[b, foo, c],
a[:])
self.assertEqual(
foo,
a[1])
a[1] = new = Element('new')
self.assertEqual(
new,
a[1])
self.assertXML(
_bytes('<a><b></b><new></new><c></c></a>'),
a)
def test_delslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1:3]
self.assertEqual(
[b, e],
list(a))
def test_delslice_negative1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1:-1]
self.assertEqual(
[b, e],
list(a))
def test_delslice_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[-3:-1]
self.assertEqual(
[b, e],
list(a))
def test_delslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1::2]
self.assertEqual(
[b, d],
list(a))
def test_delslice_step_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[::-1]
self.assertEqual(
[],
list(a))
def test_delslice_step_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[::-2]
self.assertEqual(
[b, d],
list(a))
def test_delslice_child_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b>B2<c></c>C2<d></d>D2<e></e>E2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
del a[1:3]
self.assertXML(
_bytes('<a><b></b>B2<e></e>E2</a>'),
a)
def test_delslice_tail(self):
XML = self.etree.XML
a = XML(_bytes('<a><b></b>B2<c></c>C2</a>'))
b, c = a
del a[:]
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
def test_delslice_memory(self):
# this could trigger a crash
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
del b # no more reference to b
del a[:]
self.assertEqual('c', c.tag)
def test_setslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[1:2] = s
self.assertEqual(
[b, e, f, g, d],
list(a))
def test_setslice_all(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[:] = s
self.assertEqual(
[e, f, g],
list(a))
def test_setslice_all_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[:] = s
self.assertEqual(
[e, f, g],
list(a))
def test_setslice_all_replace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
s = [b, c, d]
a[:] = s
self.assertEqual(
[b, c, d],
list(a))
def test_setslice_all_replace_reversed(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
def test_setslice_all_replace_reversed_ns1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{ns}a')
b = SubElement(a, '{ns}b', {'{ns1}a1': 'test'})
c = SubElement(a, '{ns}c', {'{ns2}a2': 'test'})
d = SubElement(a, '{ns}d', {'{ns3}a3': 'test'})
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
self.assertEqual(
['{ns}d', '{ns}c', '{ns}b'],
[ child.tag for child in a ])
self.assertEqual(
[['{ns3}a3'], ['{ns2}a2'], ['{ns1}a1']],
[ list(child.attrib.keys()) for child in a ])
def test_setslice_all_replace_reversed_ns2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{ns}a')
b = SubElement(a, '{ns1}b', {'{ns}a1': 'test'})
c = SubElement(a, '{ns2}c', {'{ns}a2': 'test'})
d = SubElement(a, '{ns3}d', {'{ns}a3': 'test'})
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
self.assertEqual(
['{ns3}d', '{ns2}c', '{ns1}b'],
[ child.tag for child in a ])
self.assertEqual(
[['{ns}a3'], ['{ns}a2'], ['{ns}a1']],
[ list(child.attrib.keys()) for child in a ])
def test_setslice_end(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
g = Element('g')
h = Element('h')
s = [e, f]
a[99:] = s
self.assertEqual(
[a, b, e, f],
list(a))
s = [g, h]
a[:0] = s
self.assertEqual(
[g, h, a, b, e, f],
list(a))
def test_setslice_single(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
s = [e]
a[0:1] = s
self.assertEqual(
[e, c],
list(a))
s = [f]
a[1:2] = s
self.assertEqual(
[e, f],
list(a))
def test_setslice_tail(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
f = BytesIO('<a><b></b>B2<c></c>C2<d></d>D2<e></e>E2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
x = Element('x')
y = Element('y')
z = Element('z')
x.tail = 'X2'
y.tail = 'Y2'
z.tail = 'Z2'
a[1:3] = [x, y, z]
self.assertXML(
_bytes('<a><b></b>B2<x></x>X2<y></y>Y2<z></z>Z2<e></e>E2</a>'),
a)
def test_setslice_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
x = Element('x')
y = Element('y')
a[1:-1] = [x, y]
self.assertEqual(
[b, x, y, d],
list(a))
def test_setslice_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
x = Element('x')
y = Element('y')
a[1:-2] = [x, y]
self.assertEqual(
[b, x, y, c, d],
list(a))
def test_setslice_end(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[3:] = s
self.assertEqual(
[b, c, d, e, f, g],
list(a))
def test_setslice_empty(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
c = Element('c')
a[:] = [b, c]
self.assertEqual(
[b, c],
list(a))
def test_tail_elementtree_root(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.tail = 'A2'
t = ElementTree(element=a)
self.assertEqual('A2',
a.tail)
def test_elementtree_getiterator(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
t = ElementTree(element=a)
self.assertEqual(
[a, b, d, c, e],
list(t.getiterator()))
def test_elementtree_getiterator_filter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
t = ElementTree(element=a)
self.assertEqual(
[a],
list(t.getiterator('a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a, a2],
list(t.getiterator('a')))
def test_ns_access(self):
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
f = BytesIO('<x:a xmlns:x="%s"><x:b></x:b></x:a>' % ns)
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns,
a[0].tag)
def test_ns_access2(self):
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
f = BytesIO('<x:a xmlns:x="%s" xmlns:y="%s"><x:b></x:b><y:b></y:b></x:a>' % (ns, ns2))
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns,
a[0].tag)
self.assertEqual('{%s}b' % ns2,
a[1].tag)
def test_ns_setting(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
a = Element('{%s}a' % ns)
b = SubElement(a, '{%s}b' % ns2)
c = SubElement(a, '{%s}c' % ns)
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
b.tag)
self.assertEqual('{%s}c' % ns,
c.tag)
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
b.tag)
self.assertEqual('{%s}c' % ns,
c.tag)
def test_ns_tag_parse(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
f = BytesIO('<a xmlns="%s" xmlns:x="%s"><x:b></x:b><b></b></a>' % (ns, ns2))
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
a[0].tag)
self.assertEqual('{%s}b' % ns,
a[1].tag)
def test_ns_attr(self):
Element = self.etree.Element
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
a = Element('a')
a.set('{%s}foo' % ns, 'Foo')
a.set('{%s}bar' % ns2, 'Bar')
self.assertEqual(
'Foo',
a.get('{%s}foo' % ns))
self.assertEqual(
'Bar',
a.get('{%s}bar' % ns2))
try:
self.assertXML(
_bytes('<a xmlns:ns0="%s" xmlns:ns1="%s" ns0:foo="Foo" ns1:bar="Bar"></a>' % (ns, ns2)),
a)
except AssertionError:
self.assertXML(
_bytes('<a xmlns:ns0="%s" xmlns:ns1="%s" ns1:foo="Foo" ns0:bar="Bar"></a>' % (ns2, ns)),
a)
def test_ns_move(self):
Element = self.etree.Element
one = self.etree.fromstring(
_bytes('<foo><bar xmlns:ns="http://a.b.c"><ns:baz/></bar></foo>'))
baz = one[0][0]
two = Element('root')
two.append(baz)
# removing the originating document could cause a crash/error before
# as namespace is not moved along with it
del one, baz
self.assertEqual('{http://a.b.c}baz', two[0].tag)
def test_ns_decl_tostring(self):
tostring = self.etree.tostring
root = self.etree.XML(
_bytes('<foo><bar xmlns:ns="http://a.b.c"><ns:baz/></bar></foo>'))
baz = root[0][0]
nsdecl = re.findall(_bytes("xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']"),
tostring(baz))
self.assertEqual([_bytes("http://a.b.c")], nsdecl)
def test_ns_decl_tostring_default(self):
tostring = self.etree.tostring
root = self.etree.XML(
_bytes('<foo><bar xmlns="http://a.b.c"><baz/></bar></foo>'))
baz = root[0][0]
nsdecl = re.findall(_bytes("xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']"),
tostring(baz))
self.assertEqual([_bytes("http://a.b.c")], nsdecl)
def test_ns_decl_tostring_root(self):
tostring = self.etree.tostring
root = self.etree.XML(
_bytes('<foo xmlns:ns="http://a.b.c"><bar><ns:baz/></bar></foo>'))
baz = root[0][0]
nsdecl = re.findall(_bytes("xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']"),
tostring(baz))
self.assertEqual([_bytes("http://a.b.c")], nsdecl)
def test_ns_decl_tostring_element(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element("foo")
bar = SubElement(root, "{http://a.b.c}bar")
baz = SubElement(bar, "{http://a.b.c}baz")
nsdecl = re.findall(_bytes("xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']"),
self.etree.tostring(baz))
self.assertEqual([_bytes("http://a.b.c")], nsdecl)
def test_attribute_xmlns_move(self):
Element = self.etree.Element
root = Element('element')
subelement = Element('subelement',
{"{http://www.w3.org/XML/1998/namespace}id": "foo"})
self.assertEqual(1, len(subelement.attrib))
self.assertEqual(
"foo",
subelement.get("{http://www.w3.org/XML/1998/namespace}id"))
root.append(subelement)
self.assertEqual(1, len(subelement.attrib))
self.assertEqual(
list({"{http://www.w3.org/XML/1998/namespace}id" : "foo"}.items()),
list(subelement.attrib.items()))
self.assertEqual(
"foo",
subelement.get("{http://www.w3.org/XML/1998/namespace}id"))
def test_namespaces_after_serialize(self):
parse = self.etree.parse
tostring = self.etree.tostring
ns_href = "http://a.b.c"
one = parse(
BytesIO('<foo><bar xmlns:ns="%s"><ns:baz/></bar></foo>' % ns_href))
baz = one.getroot()[0][0]
parsed = parse(BytesIO( tostring(baz) )).getroot()
self.assertEqual('{%s}baz' % ns_href, parsed.tag)
def test_attribute_namespace_roundtrip(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
ns_href = "http://a.b.c"
xml = _bytes('<root xmlns="%s" xmlns:x="%s"><el x:a="test" /></root>' % (
ns_href,ns_href))
root = fromstring(xml)
self.assertEqual('test', root[0].get('{%s}a' % ns_href))
xml2 = tostring(root)
self.assertTrue(_bytes(':a=') in xml2, xml2)
root2 = fromstring(xml2)
self.assertEqual('test', root2[0].get('{%s}a' % ns_href))
def test_attribute_namespace_roundtrip_replaced(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
ns_href = "http://a.b.c"
xml = _bytes('<root xmlns="%s" xmlns:x="%s"><el x:a="test" /></root>' % (
ns_href,ns_href))
root = fromstring(xml)
self.assertEqual('test', root[0].get('{%s}a' % ns_href))
root[0].set('{%s}a' % ns_href, 'TEST')
xml2 = tostring(root)
self.assertTrue(_bytes(':a=') in xml2, xml2)
root2 = fromstring(xml2)
self.assertEqual('TEST', root2[0].get('{%s}a' % ns_href))
required_versions_ET['test_register_namespace'] = (1,3)
def test_register_namespace(self):
# ET 1.3+
Element = self.etree.Element
prefix = 'TESTPREFIX'
namespace = 'http://seriously.unknown/namespace/URI'
el = Element('{%s}test' % namespace)
self.assertEqual(_bytes('<ns0:test xmlns:ns0="%s"></ns0:test>' % namespace),
self._writeElement(el))
self.etree.register_namespace(prefix, namespace)
el = Element('{%s}test' % namespace)
self.assertEqual(_bytes('<%s:test xmlns:%s="%s"></%s:test>' % (
prefix, prefix, namespace, prefix)),
self._writeElement(el))
self.assertRaises(ValueError, self.etree.register_namespace, 'ns25', namespace)
def test_tostring(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tostring(a)))
def test_tostring_element(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertEqual(_bytes('<b></b>'),
canonicalize(tostring(b)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tostring(c)))
def test_tostring_element_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(tostring(b) == _bytes('<b/>Foo') or
tostring(b) == _bytes('<b />Foo'))
required_versions_ET['test_tostring_method_html'] = (1,3)
def test_tostring_method_html(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
html = Element('html')
body = SubElement(html, 'body')
p = SubElement(body, 'p')
p.text = "html"
SubElement(p, 'br').tail = "test"
self.assertEqual(_bytes('<html><body><p>html<br>test</p></body></html>'),
tostring(html, method="html"))
required_versions_ET['test_tostring_method_text'] = (1,3)
def test_tostring_method_text(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = "TAIL"
c = SubElement(a, 'c')
c.text = "C"
self.assertEqual(_bytes('ABTAILCtail'),
tostring(a, method="text"))
def test_iterparse(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b></b><c/></a>')
iterator = iterparse(f)
self.assertEqual(None,
iterator.root)
events = list(iterator)
root = iterator.root
self.assertEqual(
[('end', root[0]), ('end', root[1]), ('end', root)],
events)
def test_iterparse_file(self):
iterparse = self.etree.iterparse
iterator = iterparse(fileInTestDir("test.xml"))
self.assertEqual(None,
iterator.root)
events = list(iterator)
root = iterator.root
self.assertEqual(
[('end', root[0]), ('end', root)],
events)
def test_iterparse_start(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b></b><c/></a>')
iterator = iterparse(f, events=('start',))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[1])],
events)
def test_iterparse_start_end(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b></b><c/></a>')
iterator = iterparse(f, events=('start','end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root), ('start', root[0]), ('end', root[0]),
('start', root[1]), ('end', root[1]), ('end', root)],
events)
def test_iterparse_clear(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b></b><c/></a>')
iterator = iterparse(f)
for event, elem in iterator:
elem.clear()
root = iterator.root
self.assertEqual(0,
len(root))
def test_iterparse_large(self):
iterparse = self.etree.iterparse
CHILD_COUNT = 12345
f = BytesIO('<a>%s</a>' % ('<b>test</b>'*CHILD_COUNT))
i = 0
for key in iterparse(f):
event, element = key
i += 1
self.assertEqual(i, CHILD_COUNT + 1)
def test_iterparse_attrib_ns(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="http://ns1/"><b><c xmlns="http://ns2/"/></b></a>')
attr_name = '{http://testns/}bla'
events = []
iterator = iterparse(f, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{http://ns1/}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
root = iterator.root
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterparse_getiterator(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
counts = []
for event, elem in iterparse(f):
counts.append(len(list(elem.getiterator())))
self.assertEqual(
[1,2,1,4],
counts)
def test_iterparse_move_elements(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
for event, node in etree.iterparse(f): pass
root = etree.Element('new_root', {})
root[:] = node[:]
self.assertEqual(
['b', 'c'],
[ el.tag for el in root ])
def test_iterparse_cdata(self):
tostring = self.etree.tostring
f = BytesIO('<root><![CDATA[test]]></root>')
context = self.etree.iterparse(f)
content = [ el.text for event,el in context ]
self.assertEqual(['test'], content)
self.assertEqual(_bytes('<root>test</root>'),
tostring(context.root))
def test_parse_file(self):
parse = self.etree.parse
# from file
tree = parse(fileInTestDir('test.xml'))
self.assertXML(
_bytes('<a><b></b></a>'),
tree.getroot())
def test_parse_file_nonexistent(self):
parse = self.etree.parse
self.assertRaises(IOError, parse, fileInTestDir('notthere.xml'))
def test_parse_error_none(self):
parse = self.etree.parse
self.assertRaises(TypeError, parse, None)
required_versions_ET['test_parse_error'] = (1,3)
def test_parse_error(self):
# ET < 1.3 raises ExpatError
parse = self.etree.parse
f = BytesIO('<a><b></c></b></a>')
self.assertRaises(SyntaxError, parse, f)
f.close()
required_versions_ET['test_parse_error_from_file'] = (1,3)
def test_parse_error_from_file(self):
parse = self.etree.parse
# from file
f = open(fileInTestDir('test_broken.xml'), 'rb')
self.assertRaises(SyntaxError, parse, f)
f.close()
def test_parse_file_object(self):
parse = self.etree.parse
# from file object
f = open(fileInTestDir('test.xml'), 'rb')
tree = parse(f)
f.close()
self.assertXML(
_bytes('<a><b></b></a>'),
tree.getroot())
def test_parse_stringio(self):
parse = self.etree.parse
f = BytesIO('<a><b></b></a>')
tree = parse(f)
f.close()
self.assertXML(
_bytes('<a><b></b></a>'),
tree.getroot()
)
def test_parse_cdata(self):
tostring = self.etree.tostring
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'))
self.assertEqual('test', root.text)
self.assertEqual(_bytes('<root>test</root>'),
tostring(root))
def test_parse_with_encoding(self):
# this can fail in libxml2 <= 2.6.22
parse = self.etree.parse
tree = parse(BytesIO('<?xml version="1.0" encoding="ascii"?><html/>'))
self.assertXML(_bytes('<html></html>'),
tree.getroot())
def test_encoding(self):
Element = self.etree.Element
a = Element('a')
a.text = _str('Søk på nettet')
self.assertXML(
_str('<a>Søk på nettet</a>').encode('UTF-8'),
a, 'utf-8')
def test_encoding_exact(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = _str('Søk på nettet')
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f, encoding='utf-8')
self.assertEqual(_str('<a>Søk på nettet</a>').encode('UTF-8'),
f.getvalue().replace(_bytes('\n'),_bytes('')))
def test_parse_file_encoding(self):
parse = self.etree.parse
# from file
tree = parse(fileInTestDir('test-string.xml'))
self.assertXML(
_str('<a>Søk på nettet</a>').encode('UTF-8'),
tree.getroot(), 'UTF-8')
def test_parse_file_object_encoding(self):
parse = self.etree.parse
# from file object
f = open(fileInTestDir('test-string.xml'), 'rb')
tree = parse(f)
f.close()
self.assertXML(
_str('<a>Søk på nettet</a>').encode('UTF-8'),
tree.getroot(), 'UTF-8')
def test_encoding_8bit_latin1(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = _str('Søk på nettet')
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f, encoding='iso-8859-1')
result = f.getvalue()
declaration = _bytes("<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>")
self.assertEncodingDeclaration(result, _bytes('iso-8859-1'))
result = result.split(_bytes('?>'), 1)[-1].replace(_bytes('\n'),_bytes(''))
self.assertEqual(_str('<a>Søk på nettet</a>').encode('iso-8859-1'),
result)
required_versions_ET['test_parse_encoding_8bit_explicit'] = (1,3)
def test_parse_encoding_8bit_explicit(self):
XMLParser = self.XMLParser
text = _str('Søk på nettet')
xml_latin1 = (_str('<a>%s</a>') % text).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
self.etree.parse,
BytesIO(xml_latin1))
tree = self.etree.parse(BytesIO(xml_latin1),
XMLParser(encoding="iso-8859-1"))
a = tree.getroot()
self.assertEqual(a.text, text)
required_versions_ET['test_parse_encoding_8bit_override'] = (1,3)
def test_parse_encoding_8bit_override(self):
XMLParser = self.XMLParser
text = _str('Søk på nettet')
wrong_declaration = _str("<?xml version='1.0' encoding='UTF-8'?>")
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
self.etree.parse,
BytesIO(xml_latin1))
tree = self.etree.parse(BytesIO(xml_latin1),
XMLParser(encoding="iso-8859-1"))
a = tree.getroot()
self.assertEqual(a.text, text)
def _test_wrong_unicode_encoding(self):
# raise error on wrong encoding declaration in unicode strings
XML = self.etree.XML
test_utf = (_str('<?xml version="1.0" encoding="iso-8859-1"?>') +
_str('<a>Søk på nettet</a>'))
self.assertRaises(SyntaxError, XML, test_utf)
def test_encoding_write_default_encoding(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = _str('Søk på nettet')
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f)
data = f.getvalue().replace(_bytes('\n'),_bytes(''))
self.assertEqual(
_str('<a>Søk på nettet</a>').encode('ASCII', 'xmlcharrefreplace'),
data)
def test_encoding_tostring(self):
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
a.text = _str('Søk på nettet')
self.assertEqual(_str('<a>Søk på nettet</a>').encode('UTF-8'),
tostring(a, encoding='utf-8'))
def test_encoding_tostring_unknown(self):
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
a.text = _str('Søk på nettet')
self.assertRaises(LookupError, tostring, a,
encoding='Invalid Encoding')
def test_encoding_tostring_sub(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = _str('Søk på nettet')
self.assertEqual(_str('<b>Søk på nettet</b>').encode('UTF-8'),
tostring(b, encoding='utf-8'))
def test_encoding_tostring_sub_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = _str('Søk på nettet')
b.tail = _str('Søk')
self.assertEqual(_str('<b>Søk på nettet</b>Søk').encode('UTF-8'),
tostring(b, encoding='utf-8'))
def test_encoding_tostring_default_encoding(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
a.text = _str('Søk på nettet')
expected = _bytes('<a>Søk på nettet</a>')
self.assertEqual(
expected,
tostring(a))
def test_encoding_sub_tostring_default_encoding(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = _str('Søk på nettet')
expected = _bytes('<b>Søk på nettet</b>')
self.assertEqual(
expected,
tostring(b))
def test_encoding_8bit_xml(self):
utext = _str('Søk på nettet')
uxml = _str('<p>%s</p>') % utext
prologue = _bytes('<?xml version="1.0" encoding="iso-8859-1" ?>')
isoxml = prologue + uxml.encode('iso-8859-1')
tree = self.etree.XML(isoxml)
self.assertEqual(utext, tree.text)
def test_encoding_utf8_bom(self):
utext = _str('Søk på nettet')
uxml = (_str('<?xml version="1.0" encoding="UTF-8"?>') +
_str('<p>%s</p>') % utext)
bom = _bytes('\\xEF\\xBB\\xBF').decode("unicode_escape").encode("latin1")
xml = bom + uxml.encode("utf-8")
tree = etree.XML(xml)
self.assertEqual(utext, tree.text)
def test_encoding_8bit_parse_stringio(self):
utext = _str('Søk på nettet')
uxml = _str('<p>%s</p>') % utext
prologue = _bytes('<?xml version="1.0" encoding="iso-8859-1" ?>')
isoxml = prologue + uxml.encode('iso-8859-1')
el = self.etree.parse(BytesIO(isoxml)).getroot()
self.assertEqual(utext, el.text)
def test_deepcopy_elementtree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.text = "Foo"
atree = ElementTree(a)
btree = copy.deepcopy(atree)
self.assertEqual("Foo", atree.getroot().text)
self.assertEqual("Foo", btree.getroot().text)
self.assertFalse(btree is atree)
self.assertFalse(btree.getroot() is atree.getroot())
def test_deepcopy(self):
Element = self.etree.Element
a = Element('a')
a.text = 'Foo'
b = copy.deepcopy(a)
self.assertEqual('Foo', b.text)
b.text = 'Bar'
self.assertEqual('Bar', b.text)
self.assertEqual('Foo', a.text)
del a
self.assertEqual('Bar', b.text)
def test_deepcopy_tail(self):
Element = self.etree.Element
a = Element('a')
a.tail = 'Foo'
b = copy.deepcopy(a)
self.assertEqual('Foo', b.tail)
b.tail = 'Bar'
self.assertEqual('Bar', b.tail)
self.assertEqual('Foo', a.tail)
del a
self.assertEqual('Bar', b.tail)
def test_deepcopy_subelement(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
a.text = 'FooText'
a.tail = 'FooTail'
b = copy.deepcopy(a)
self.assertEqual('FooText', b.text)
self.assertEqual('FooTail', b.tail)
b.text = 'BarText'
b.tail = 'BarTail'
self.assertEqual('BarTail', b.tail)
self.assertEqual('FooTail', a.tail)
self.assertEqual('BarText', b.text)
self.assertEqual('FooText', a.text)
del a
self.assertEqual('BarTail', b.tail)
self.assertEqual('BarText', b.text)
def test_deepcopy_namespaces(self):
root = self.etree.XML(_bytes('''<doc xmlns="dns" xmlns:t="tns">
<parent><node t:foo="bar" /></parent>
</doc>'''))
self.assertEqual(
root[0][0].get('{tns}foo'),
copy.deepcopy(root[0])[0].get('{tns}foo') )
self.assertEqual(
root[0][0].get('{tns}foo'),
copy.deepcopy(root[0][0]).get('{tns}foo') )
def test_deepcopy_append(self):
# previously caused a crash
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
b = copy.deepcopy(a)
a.append( Element('C') )
b.append( Element('X') )
self.assertEqual(_bytes('<a><C/></a>'),
tostring(a).replace(_bytes(' '), _bytes('')))
self.assertEqual(_bytes('<a><X/></a>'),
tostring(b).replace(_bytes(' '), _bytes('')))
def test_deepcopy_comment(self):
# previously caused a crash
# not supported by ET < 1.3!
Comment = self.etree.Comment
a = Comment("ONE")
b = copy.deepcopy(a)
b.text = "ANOTHER"
self.assertEqual('ONE', a.text)
self.assertEqual('ANOTHER', b.text)
def test_shallowcopy(self):
Element = self.etree.Element
a = Element('a')
a.text = 'Foo'
b = copy.copy(a)
self.assertEqual('Foo', b.text)
b.text = 'Bar'
self.assertEqual('Bar', b.text)
self.assertEqual('Foo', a.text)
# XXX ElementTree will share nodes, but lxml.etree won't..
def test_shallowcopy_elementtree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.text = 'Foo'
atree = ElementTree(a)
btree = copy.copy(atree)
self.assertFalse(btree is atree)
self.assertTrue(btree.getroot() is atree.getroot())
self.assertEqual('Foo', atree.getroot().text)
def _test_element_boolean(self):
# deprecated as of ET 1.3/lxml 2.0
etree = self.etree
e = etree.Element('foo')
self.assertEqual(False, bool(e))
etree.SubElement(e, 'bar')
self.assertEqual(True, bool(e))
e = etree.Element('foo')
e.text = 'hey'
self.assertEqual(False, bool(e))
e = etree.Element('foo')
e.tail = 'bar'
self.assertEqual(False, bool(e))
e = etree.Element('foo')
e.set('bar', 'Bar')
self.assertEqual(False, bool(e))
def test_multiple_elementrees(self):
etree = self.etree
a = etree.Element('a')
b = etree.SubElement(a, 'b')
t = etree.ElementTree(a)
self.assertEqual(self._rootstring(t), _bytes('<a><b/></a>'))
t1 = etree.ElementTree(a)
self.assertEqual(self._rootstring(t1), _bytes('<a><b/></a>'))
self.assertEqual(self._rootstring(t), _bytes('<a><b/></a>'))
t2 = etree.ElementTree(b)
self.assertEqual(self._rootstring(t2), _bytes('<b/>'))
self.assertEqual(self._rootstring(t1), _bytes('<a><b/></a>'))
self.assertEqual(self._rootstring(t), _bytes('<a><b/></a>'))
def test_qname(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a1 = etree.Element(qname)
a2 = etree.SubElement(a1, qname)
self.assertEqual(a1.tag, "{myns}a")
self.assertEqual(a2.tag, "{myns}a")
def test_qname_cmp(self):
etree = self.etree
qname1 = etree.QName('myns', 'a')
qname2 = etree.QName('myns', 'a')
self.assertEqual(qname1, "{myns}a")
self.assertEqual("{myns}a", qname2)
self.assertEqual(qname1, qname1)
self.assertEqual(qname1, qname2)
def test_qname_attribute_getset(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a = etree.Element(qname)
a.set(qname, "value")
self.assertEqual(a.get(qname), "value")
self.assertEqual(a.get("{myns}a"), "value")
def test_qname_attrib(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a = etree.Element(qname)
a.attrib[qname] = "value"
self.assertEqual(a.attrib[qname], "value")
self.assertEqual(a.attrib.get(qname), "value")
self.assertEqual(a.attrib["{myns}a"], "value")
self.assertEqual(a.attrib.get("{myns}a"), "value")
def test_qname_attribute_resolve(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname)
a.set(qname, qname)
self.assertXML(
_bytes('<ns0:a xmlns:ns0="http://myns" ns0:a="ns0:a"></ns0:a>'),
a)
def test_qname_attribute_resolve_new(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element('a')
a.set('a', qname)
self.assertXML(
_bytes('<a xmlns:ns0="http://myns" a="ns0:a"></a>'),
a)
def test_qname_attrib_resolve(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname)
a.attrib[qname] = qname
self.assertXML(
_bytes('<ns0:a xmlns:ns0="http://myns" ns0:a="ns0:a"></ns0:a>'),
a)
def test_parser_version(self):
etree = self.etree
parser = etree.XMLParser()
if hasattr(parser, "version"):
# ElementTree 1.3+, cET
self.assertTrue(re.match("[^ ]+ [0-9.]+", parser.version))
# feed parser interface
def test_feed_parser_bytes(self):
parser = self.XMLParser()
parser.feed(_bytes('<?xml version='))
parser.feed(_bytes('"1.0"?><ro'))
parser.feed(_bytes('ot><'))
parser.feed(_bytes('a test="works"/'))
parser.feed(_bytes('></root'))
parser.feed(_bytes('>'))
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
def test_feed_parser_unicode(self):
parser = self.XMLParser()
parser.feed(_str('<ro'))
parser.feed(_str('ot><'))
parser.feed(_str('a test="works"/'))
parser.feed(_str('></root'))
parser.feed(_str('>'))
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
required_versions_ET['test_feed_parser_error_close_empty'] = (1,3)
def test_feed_parser_error_close_empty(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_close_incomplete'] = (1,3)
def test_feed_parser_error_close_incomplete(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_broken'] = (1,3)
def test_feed_parser_error_broken(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
try:
parser.feed('<><><><><><><')
except ParseError:
# can raise, but not required before close()
pass
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_position'] = (1,3)
def test_feed_parser_error_position(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
try:
parser.close()
except ParseError:
e = sys.exc_info()[1]
self.assertNotEqual(None, e.code)
self.assertNotEqual(0, e.code)
self.assertTrue(isinstance(e.position, tuple))
self.assertTrue(e.position >= (0, 0))
# parser target interface
required_versions_ET['test_parser_target_property'] = (1,3)
def test_parser_target_property(self):
class Target(object):
pass
target = Target()
parser = self.XMLParser(target=target)
self.assertEqual(target, parser.target)
def test_parser_target_tag(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed("<TAG/>")
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_start(self):
assertEqual = self.assertEqual
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
raise ValueError("TEST")
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
if 'lxml' in self.etree.__name__:
self.assertEqual(["start"], events)
else:
# cElementTree calls end() as well
self.assertTrue("start" in events)
def test_parser_target_error_in_end(self):
assertEqual = self.assertEqual
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
raise ValueError("TEST")
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_close(self):
assertEqual = self.assertEqual
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
raise ValueError("TEST")
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
parser.close()
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_start_and_close(self):
assertEqual = self.assertEqual
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
raise IndexError("TEST-IE")
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
raise ValueError("TEST-VE")
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
parser.close()
except IndexError:
if 'lxml' in self.etree.__name__:
# we try not to swallow the initial exception in Py2
self.assertTrue(sys.version_info[0] < 3)
self.assertTrue('TEST-IE' in str(sys.exc_info()[1]))
except ValueError:
if 'lxml' in self.etree.__name__:
self.assertTrue(sys.version_info[0] >= 3)
self.assertTrue('TEST-VE' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
if 'lxml' in self.etree.__name__:
self.assertEqual(["start"], events)
else:
# cElementTree calls end() as well
self.assertTrue("start" in events)
def test_elementtree_parser_target(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
Element = self.etree.Element
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return Element("DONE")
parser = self.XMLParser(target=Target())
tree = self.etree.ElementTree()
tree.parse(BytesIO("<TAG/>"), parser=parser)
self.assertEqual("DONE", tree.getroot().tag)
self.assertEqual(["start", "end"], events)
def test_parser_target_attrib(self):
assertEqual = self.assertEqual
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
for name, value in attrib.items():
assertEqual(tag + name, value)
def end(self, tag):
events.append("end-" + tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed('<root a="roota" b="rootb"><sub c="subc"/></root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "start-sub", "end-sub", "end-root"],
events)
def test_parser_target_data(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed('<root>A<sub/>B</root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-sub",
"end-sub", "data-B", "end-root"],
events)
def test_parser_target_entity(self):
events = []
class Target(object):
def __init__(self):
self._data = []
def _flush_data(self):
if self._data:
events.append("data-" + ''.join(self._data))
del self._data[:]
def start(self, tag, attrib):
self._flush_data()
events.append("start-" + tag)
def end(self, tag):
self._flush_data()
events.append("end-" + tag)
def data(self, data):
self._data.append(data)
def close(self):
self._flush_data()
return "DONE"
parser = self.XMLParser(target=Target())
dtd = '''
<!DOCTYPE root [
<!ELEMENT root (sub*)>
<!ELEMENT sub (#PCDATA)>
<!ENTITY ent "an entity">
]>
'''
parser.feed(dtd+'<root><sub/><sub>this is &ent;</sub><sub/></root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "start-sub", "end-sub", "start-sub",
"data-this is an entity",
"end-sub", "start-sub", "end-sub", "end-root"],
events)
required_versions_ET['test_parser_target_entity_unknown'] = (1,3)
def test_parser_target_entity_unknown(self):
events = []
class Target(object):
def __init__(self):
self._data = []
def _flush_data(self):
if self._data:
events.append("data-" + ''.join(self._data))
del self._data[:]
def start(self, tag, attrib):
self._flush_data()
events.append("start-" + tag)
def end(self, tag):
self._flush_data()
events.append("end-" + tag)
def data(self, data):
self._data.append(data)
def close(self):
self._flush_data()
return "DONE"
parser = self.XMLParser(target=Target())
def feed():
parser.feed('<root><sub/><sub>some &ent;</sub><sub/></root>')
parser.close()
self.assertRaises(self.etree.ParseError, feed)
def test_treebuilder(self):
builder = self.etree.TreeBuilder()
el = builder.start("root", {'a':'A', 'b':'B'})
self.assertEqual("root", el.tag)
self.assertEqual({'a':'A', 'b':'B'}, el.attrib)
builder.data("ROOTTEXT")
el = builder.start("child", {'x':'X', 'y':'Y'})
self.assertEqual("child", el.tag)
self.assertEqual({'x':'X', 'y':'Y'}, el.attrib)
builder.data("CHILDTEXT")
el = builder.end("child")
self.assertEqual("child", el.tag)
self.assertEqual({'x':'X', 'y':'Y'}, el.attrib)
self.assertEqual("CHILDTEXT", el.text)
self.assertEqual(None, el.tail)
builder.data("CHILDTAIL")
root = builder.end("root")
self.assertEqual("root", root.tag)
self.assertEqual("ROOTTEXT", root.text)
self.assertEqual("CHILDTEXT", root[0].text)
self.assertEqual("CHILDTAIL", root[0].tail)
def test_treebuilder_target(self):
parser = self.XMLParser(target=self.etree.TreeBuilder())
parser.feed('<root>ROOTTEXT<child>CHILDTEXT</child>CHILDTAIL</root>')
root = parser.close()
self.assertEqual("root", root.tag)
self.assertEqual("ROOTTEXT", root.text)
self.assertEqual("CHILDTEXT", root[0].text)
self.assertEqual("CHILDTAIL", root[0].tail)
# helper methods
def _writeElement(self, element, encoding='us-ascii'):
"""Write out element for comparison.
"""
data = self.etree.tostring(element, encoding=encoding)
return canonicalize(data)
def _writeElementFile(self, element, encoding='us-ascii'):
"""Write out element for comparison, using real file.
"""
ElementTree = self.etree.ElementTree
handle, filename = tempfile.mkstemp()
try:
f = open(filename, 'wb')
tree = ElementTree(element=element)
tree.write(f, encoding=encoding)
f.close()
f = open(filename, 'rb')
data = f.read()
f.close()
finally:
os.close(handle)
os.remove(filename)
return canonicalize(data)
def assertXML(self, expected, element, encoding='us-ascii'):
"""Writes element out and checks whether it is expected.
Does this two ways; once using BytesIO, once using a real file.
"""
if isinstance(expected, unicode):
expected = expected.encode(encoding)
self.assertEqual(expected, self._writeElement(element, encoding))
self.assertEqual(expected, self._writeElementFile(element, encoding))
def assertEncodingDeclaration(self, result, encoding):
"Checks if the result XML byte string specifies the encoding."
enc_re = r"<\?xml[^>]+ encoding=[\"']([^\"']+)[\"']"
if isinstance(result, str):
has_encoding = re.compile(enc_re).match
else:
has_encoding = re.compile(_bytes(enc_re)).match
self.assertTrue(has_encoding(result))
result_encoding = has_encoding(result).group(1)
self.assertEqual(result_encoding.upper(), encoding.upper())
def _rootstring(self, tree):
return self.etree.tostring(tree.getroot()).replace(
_bytes(' '), _bytes('')).replace(_bytes('\n'), _bytes(''))
def _check_element_tree(self, tree):
self._check_element(tree.getroot())
def _check_element(self, element):
self.assertTrue(hasattr(element, 'tag'))
self.assertTrue(hasattr(element, 'attrib'))
self.assertTrue(hasattr(element, 'text'))
self.assertTrue(hasattr(element, 'tail'))
self._check_string(element.tag)
self._check_mapping(element.attrib)
if element.text != None:
self._check_string(element.text)
if element.tail != None:
self._check_string(element.tail)
def _check_string(self, string):
len(string)
for char in string:
self.assertEqual(1, len(char))
new_string = string + ""
new_string = string + " "
string[:0]
def _check_mapping(self, mapping):
len(mapping)
keys = mapping.keys()
values = mapping.values()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual("value", mapping["key"])
class _XMLPullParserTest(unittest.TestCase):
etree = None
def _feed(self, parser, data, chunk_size=None):
if chunk_size is None:
parser.feed(data)
else:
for i in range(0, len(data), chunk_size):
parser.feed(data[i:i+chunk_size])
def _close_and_return_root(self, parser):
if 'ElementTree' in self.etree.__name__:
# ElementTree's API is a bit unwieldy in Py3.4
root = parser._close_and_return_root()
else:
root = parser.close()
return root
def assert_event_tags(self, parser, expected):
events = parser.read_events()
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
def test_simple_xml(self):
for chunk_size in (None, 1, 5):
#with self.subTest(chunk_size=chunk_size):
parser = self.etree.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n", chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser,
"<root>\n <element key='value'>text</element",
chunk_size)
self.assert_event_tags(parser, [])
self._feed(parser, ">\n", chunk_size)
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser, "<element>text</element>tail\n", chunk_size)
self._feed(parser, "<empty-element/>\n", chunk_size)
self.assert_event_tags(parser, [
('end', 'element'),
('end', 'empty-element'),
])
self._feed(parser, "</root>\n", chunk_size)
self.assert_event_tags(parser, [('end', 'root')])
root = self._close_and_return_root(parser)
self.assertEqual(root.tag, 'root')
def test_feed_while_iterating(self):
parser = self.etree.XMLPullParser()
it = parser.read_events()
self._feed(parser, "<root>\n <element key='value'>text</element>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'element'))
self._feed(parser, "</root>\n")
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'root'))
try:
next(it)
except StopIteration:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_simple_xml_with_ns(self):
parser = self.etree.XMLPullParser()
self.assert_event_tags(parser, [])
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root xmlns='namespace'>\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', '{namespace}element')])
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self.assert_event_tags(parser, [
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
])
self._feed(parser, "</root>\n")
self.assert_event_tags(parser, [('end', '{namespace}root')])
root = self._close_and_return_root(parser)
self.assertEqual(root.tag, '{namespace}root')
def test_ns_events(self):
parser = self.etree.XMLPullParser(events=('start-ns', 'end-ns'))
self._feed(parser, "<!-- comment -->\n")
self._feed(parser, "<root xmlns='namespace'>\n")
self.assertEqual(
list(parser.read_events()),
[('start-ns', ('', 'namespace'))])
self._feed(parser, "<element key='value'>text</element")
self._feed(parser, ">\n")
self._feed(parser, "<element>text</element>tail\n")
self._feed(parser, "<empty-element/>\n")
self._feed(parser, "</root>\n")
self.assertEqual(list(parser.read_events()), [('end-ns', None)])
parser.close()
def test_events(self):
parser = self.etree.XMLPullParser(events=())
self._feed(parser, "<root/>\n")
self.assert_event_tags(parser, [])
parser = self.etree.XMLPullParser(events=('start', 'end'))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [('end', 'element')])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
('end', '{foo}empty-element'),
('end', '{foo}element'),
])
self._feed(parser, "</root>")
root = self._close_and_return_root(parser)
self.assert_event_tags(parser, [('end', 'root')])
self.assertEqual(root.tag, 'root')
parser = self.etree.XMLPullParser(events=('start',))
self._feed(parser, "<!-- comment -->\n")
self.assert_event_tags(parser, [])
self._feed(parser, "<root>\n")
self.assert_event_tags(parser, [('start', 'root')])
self._feed(parser, "<element key='value'>text</element")
self.assert_event_tags(parser, [('start', 'element')])
self._feed(parser, ">\n")
self.assert_event_tags(parser, [])
self._feed(parser,
"<element xmlns='foo'>text<empty-element/></element>tail\n")
self.assert_event_tags(parser, [
('start', '{foo}element'),
('start', '{foo}empty-element'),
])
self._feed(parser, "</root>")
root = self._close_and_return_root(parser)
self.assertEqual(root.tag, 'root')
def test_events_sequence(self):
# Test that events can be some sequence that's not just a tuple or list
eventset = set(['end', 'start'])
parser = self.etree.XMLPullParser(events=eventset)
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
class DummyIter:
def __init__(self):
self.events = iter(['start', 'end', 'start-ns'])
def __iter__(self):
return self
def __next__(self):
return next(self.events)
next = __next__
parser = self.etree.XMLPullParser(events=DummyIter())
self._feed(parser, "<foo>bar</foo>")
self.assert_event_tags(parser, [('start', 'foo'), ('end', 'foo')])
def test_unknown_event(self):
try:
self.etree.XMLPullParser(events=('start', 'end', 'bogus'))
except ValueError:
self.assertTrue(True)
else:
self.assertTrue(False)
if etree:
class ETreeTestCase(_ETreeTestCaseBase):
etree = etree
class ETreePullTestCase(_XMLPullParserTest):
etree = etree
if ElementTree:
class ElementTreeTestCase(_ETreeTestCaseBase):
etree = ElementTree
@classmethod
def setUpClass(cls):
import warnings
# ElementTree warns about getiterator() in recent Pythons
warnings.filterwarnings(
'ignore',
'This method will be removed.*\.iter\(\).*instead',
PendingDeprecationWarning)
filter_by_version(
ElementTreeTestCase,
ElementTreeTestCase.required_versions_ET, ET_VERSION)
if hasattr(ElementTree, 'XMLPullParser'):
class ElementTreePullTestCase(_XMLPullParserTest):
etree = ElementTree
else:
ElementTreePullTestCase = None
if cElementTree:
class CElementTreeTestCase(_ETreeTestCaseBase):
etree = cElementTree
filter_by_version(
CElementTreeTestCase,
CElementTreeTestCase.required_versions_cET, CET_VERSION)
def test_suite():
suite = unittest.TestSuite()
if etree:
suite.addTests([unittest.makeSuite(ETreeTestCase)])
suite.addTests([unittest.makeSuite(ETreePullTestCase)])
if ElementTree:
suite.addTests([unittest.makeSuite(ElementTreeTestCase)])
if ElementTreePullTestCase:
suite.addTests([unittest.makeSuite(ElementTreePullTestCase)])
if cElementTree:
suite.addTests([unittest.makeSuite(CElementTreeTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
isnnn/Sick-Beard-TPB | lib/hachoir_parser/file_system/fat.py | 90 | 16185 | from lib.hachoir_core.compatibility import sorted
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, StaticFieldSet,
RawBytes, PaddingBytes, createPaddingField, Link, Fragment,
Bit, Bits, UInt8, UInt16, UInt32,
String, Bytes, NullBytes)
from lib.hachoir_core.field.integer import GenericInteger
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.error import error
from lib.hachoir_core.tools import humanFilesize, makePrintable
import datetime
import re
strip_index = re.compile(r'\[[^]]+]$')
class Boot(FieldSet):
static_size = 512*8
def createFields(self):
yield Bytes(self, "jmp", 3, "Jump instruction (to skip over header on boot)")
yield Bytes(self, "oem_name", 8, "OEM Name (padded with spaces)")
yield UInt16(self, "sector_size", "Bytes per sector")
yield UInt8 (self, "cluster_size", "Sectors per cluster")
yield UInt16(self, "reserved_sectors", "Reserved sector count (including boot sector)")
yield UInt8 (self, "fat_nb", "Number of file allocation tables")
yield UInt16(self, "max_root", "Maximum number of root directory entries")
yield UInt16(self, "sectors1", "Total sectors (if zero, use 'sectors2')")
yield UInt8 (self, "media_desc", "Media descriptor")
yield UInt16(self, "fat_size", "Sectors per FAT")
yield UInt16(self, "track_size", "Sectors per track")
yield UInt16(self, "head_nb", "Number of heads")
yield UInt32(self, "hidden", "Hidden sectors")
yield UInt32(self, "sectors2", "Total sectors (if greater than 65535)")
if self.parent.version == 32:
yield UInt32(self, "fat32_size", "Sectors per FAT")
yield UInt16(self, "fat_flags", "FAT Flags")
yield UInt16(self, "version", "Version")
yield UInt32(self, "root_start", "Cluster number of root directory start")
yield UInt16(self, "inf_sector", "Sector number of FS Information Sector")
yield UInt16(self, "boot_copy", "Sector number of a copy of this boot sector")
yield NullBytes(self, "reserved[]", 12, "Reserved")
yield UInt8(self, "phys_drv", "Physical drive number")
yield NullBytes(self, "reserved[]", 1, 'Reserved ("current head")')
yield UInt8(self, "sign", "Signature")
yield textHandler(UInt32(self, "serial", "ID (serial number)"), hexadecimal)
yield String(self, "label", 11, "Volume Label", strip=' ', charset="ASCII")
yield String(self, "fs_type", 8, "FAT file system type", strip=' ', charset="ASCII")
yield Bytes(self, "code", 510-self.current_size/8, "Operating system boot code")
yield Bytes(self, "trail_sig", 2, "Signature (0x55 0xAA)")
class FSInfo(StaticFieldSet):
format = (
(String, "lead_sig", 4, 'Signature ("RRaA")'),
(NullBytes, "reserved[]", 480),
(String, "struct_sig", 4, 'Signature ("rrAa")'),
(UInt32, "free_count", "Last known free cluster count on the volume"),
(UInt32, "nxt_free",),
(NullBytes, "reserved[]", 12),
(Bytes, "trail_sig", 4, "Signature (0x00 0x00 0x55 0xAA)")
)
class FAT(FieldSet):
class FAT(FieldSet):
def createFields(self):
parent = self.parent
version = parent.parent.version
text_handler = parent.text_handler
while self.current_size < self._size:
yield textHandler(GenericInteger(self, 'entry[]', False, version), text_handler)
def createFields(self):
version = self.parent.version
max_entry = 1 << min(28, version)
def FatEntry(chunk):
i = chunk.value
j = (1 - i) % max_entry
if j == 0:
return "reserved cluster"
elif j == 1:
return "free cluster"
elif j < 10:
return "end of a chain"
elif j == 10:
return "bad cluster"
elif j < 18:
return "reserved value"
else:
return str(i)
self.text_handler = FatEntry
while self.current_size < self._size:
yield FAT.FAT(self, 'group[]', size=min(1000*version,self._size-self.current_size))
class Date(FieldSet):
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name, size={
"create": 5,
"access": 2,
"modify": 4,
}[name] * 8)
def createFields(self):
size = self.size / 8
if size > 2:
if size > 4:
yield UInt8(self, "cs", "10ms units, values from 0 to 199")
yield Bits(self, "2sec", 5, "seconds/2")
yield Bits(self, "min", 6, "minutes")
yield Bits(self, "hour", 5, "hours")
yield Bits(self, "day", 5, "(1-31)")
yield Bits(self, "month", 4, "(1-12)")
yield Bits(self, "year", 7, "(0 = 1980, 127 = 2107)")
def createDescription(self):
date = [ self["year"].value, self["month"].value, self["day"].value ]
size = self.size / 8
if size > 2:
mkdate = datetime.datetime
cs = 200 * self["2sec"].value
if size > 4:
cs += self["cs"].value
date += [ self["hour"].value, self["min"].value, cs / 100, cs % 100 * 10000 ]
else:
mkdate = datetime.date
if date == [ 0 for i in date ]:
date = None
else:
date[0] += 1980
try:
date = mkdate(*tuple(date))
except ValueError:
return "invalid"
return str(date)
class InodeLink(Link):
def __init__(self, parent, name, target=None):
Link.__init__(self, parent, name)
self.target = target
self.first = None
def _getTargetPath(self):
if not self.target:
parent = self.parent
self.target = strip_index.sub(r"\\", parent.parent._name) + parent.getFilename().rstrip("/")
return self.target
def createValue(self):
field = InodeGen(self["/"], self.parent, self._getTargetPath())(self)
if field:
self._display = field.path
return Link.createValue(self)
def createDisplay(self):
return "/%s[0]" % self._getTargetPath()
class FileEntry(FieldSet):
static_size = 32*8
process = False
LFN = False
def __init__(self, *args):
FieldSet.__init__(self, *args)
self.status = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)
if self.status in (0, 0xE5):
return
magic = self.stream.readBits(self.absolute_address+11*8, 8, LITTLE_ENDIAN)
if magic & 0x3F == 0x0F:
self.LFN = True
elif self.getFilename() not in (".", ".."):
self.process = True
def getFilename(self):
name = self["name"].value
if isinstance(name, str):
name = makePrintable(name, "ASCII", to_unicode=True)
ext = self["ext"].value
if ext:
name += "." + ext
if name[0] == 5:
name = "\xE5" + name[1:]
if not self.LFN and self["directory"].value:
name += "/"
return name
def createDescription(self):
if self.status == 0:
return "Free entry"
elif self.status == 0xE5:
return "Deleted file"
elif self.LFN:
name = "".join( field.value for field in self.array("name") )
try:
name = name[:name.index('\0')]
except ValueError:
pass
seq_no = self["seq_no"].value
return "Long filename part: '%s' [%u]" % (name, seq_no)
else:
return "File: '%s'" % self.getFilename()
def getCluster(self):
cluster = self["cluster_lo"].value
if self.parent.parent.version > 16:
cluster += self["cluster_hi"].value << 16
return cluster
def createFields(self):
if not self.LFN:
yield String(self, "name", 8, "DOS file name (padded with spaces)",
strip=' ', charset="ASCII")
yield String(self, "ext", 3, "DOS file extension (padded with spaces)",
strip=' ', charset="ASCII")
yield Bit(self, "read_only")
yield Bit(self, "hidden")
yield Bit(self, "system")
yield Bit(self, "volume_label")
yield Bit(self, "directory")
yield Bit(self, "archive")
yield Bit(self, "device")
yield Bit(self, "unused")
yield RawBytes(self, "reserved", 1, "Something about the case")
yield Date(self, "create")
yield Date(self, "access")
if self.parent.parent.version > 16:
yield UInt16(self, "cluster_hi")
else:
yield UInt16(self, "ea_index")
yield Date(self, "modify")
yield UInt16(self, "cluster_lo")
size = UInt32(self, "size")
yield size
if self.process:
del self.process
target_size = size.value
if self["directory"].value:
if target_size:
size.error("(FAT) value must be zero")
target_size = 0
elif not target_size:
return
self.target_size = 8 * target_size
yield InodeLink(self, "data")
else:
yield UInt8(self, "seq_no", "Sequence Number")
yield String(self, "name[]", 10, "(5 UTF-16 characters)",
charset="UTF-16-LE")
yield UInt8(self, "magic", "Magic number (15)")
yield NullBytes(self, "reserved", 1, "(always 0)")
yield UInt8(self, "checksum", "Checksum of DOS file name")
yield String(self, "name[]", 12, "(6 UTF-16 characters)",
charset="UTF-16-LE")
yield UInt16(self, "first_cluster", "(always 0)")
yield String(self, "name[]", 4, "(2 UTF-16 characters)",
charset="UTF-16-LE")
class Directory(Fragment):
def createFields(self):
while self.current_size < self._size:
yield FileEntry(self, "entry[]")
class File(Fragment):
def _getData(self):
return self["data"]
def createFields(self):
yield Bytes(self, "data", self.datasize/8)
padding = self._size - self.current_size
if padding:
yield createPaddingField(self, padding)
class InodeGen:
def __init__(self, root, entry, path):
self.root = root
self.cluster = root.clusters(entry.getCluster)
self.path = path
self.filesize = entry.target_size
self.done = 0
def createInputStream(cis, **args):
args["size"] = self.filesize
args.setdefault("tags",[]).append(("filename", entry.getFilename()))
return cis(**args)
self.createInputStream = createInputStream
def __call__(self, prev):
name = self.path + "[]"
address, size, last = self.cluster.next()
if self.filesize:
if self.done >= self.filesize:
error("(FAT) bad metadata for " + self.path)
return
field = File(self.root, name, size=size)
if prev.first is None:
field._description = 'File size: %s' % humanFilesize(self.filesize//8)
field.setSubIStream(self.createInputStream)
field.datasize = min(self.filesize - self.done, size)
self.done += field.datasize
else:
field = Directory(self.root, name, size=size)
padding = self.root.getFieldByAddress(address, feed=False)
if not isinstance(padding, (PaddingBytes, RawBytes)):
error("(FAT) address %u doesn't point to a padding field" % address)
return
if last:
next = None
else:
next = lambda: self(field)
field.setLinks(prev.first, next)
self.root.writeFieldsIn(padding, address, (field,))
return field
class FAT_FS(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"category": "file_system",
"min_size": 512*8,
"file_ext": ("",),
}
def _validate(self, type_offset):
if self.stream.readBytes(type_offset*8, 8) != ("FAT%-5u" % self.version):
return "Invalid FAT%u signature" % self.version
if self.stream.readBytes(510*8, 2) != "\x55\xAA":
return "Invalid BIOS signature"
return True
def clusters(self, cluster_func):
max_entry = (1 << min(28, self.version)) - 16
cluster = cluster_func()
if 1 < cluster < max_entry:
clus_nb = 1
next = cluster
while True:
next = self.fat[next/1000][next%1000].value
if not 1 < next < max_entry:
break
if cluster + clus_nb == next:
clus_nb += 1
else:
yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, False
cluster = next
clus_nb = 1
yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, True
def createFields(self):
# Read boot seector
boot = Boot(self, "boot", "Boot sector")
yield boot
self.sector_size = boot["sector_size"].value
if self.version == 32:
for field in sorted((
(boot["inf_sector"].value, lambda: FSInfo(self, "fsinfo")),
(boot["boot_copy"].value, lambda: Boot(self, "bkboot", "Copy of the boot sector")),
)):
if field[0]:
padding = self.seekByte(field[0] * self.sector_size)
if padding:
yield padding
yield field[1]()
padding = self.seekByte(boot["reserved_sectors"].value * self.sector_size)
if padding:
yield padding
# Read the two FAT
fat_size = boot["fat_size"].value
if fat_size == 0:
fat_size = boot["fat32_size"].value
fat_size *= self.sector_size * 8
for i in xrange(boot["fat_nb"].value):
yield FAT(self, "fat[]", "File Allocation Table", size=fat_size)
# Read inode table (Directory)
self.cluster_size = boot["cluster_size"].value * self.sector_size * 8
self.fat = self["fat[0]"]
if "root_start" in boot:
self.target_size = 0
self.getCluster = lambda: boot["root_start"].value
yield InodeLink(self, "root", "root")
else:
yield Directory(self, "root[]", size=boot["max_root"].value * 32 * 8)
self.data_start = self.current_size - 2 * self.cluster_size
sectors = boot["sectors1"].value
if not sectors:
sectors = boot["sectors2"].value
# Create one big padding field for the end
size = sectors * self.sector_size
if self._size:
size = min(size, self.size//8)
padding = self.seekByte(size)
if padding:
yield padding
class FAT12(FAT_FS):
PARSER_TAGS = {
"id": "fat12",
"description": "FAT12 filesystem",
"magic": (("FAT12 ", 54*8),),
}
version = 12
def validate(self):
return FAT_FS._validate(self, 54)
class FAT16(FAT_FS):
PARSER_TAGS = {
"id": "fat16",
"description": "FAT16 filesystem",
"magic": (("FAT16 ", 54*8),),
}
version = 16
def validate(self):
return FAT_FS._validate(self, 54)
class FAT32(FAT_FS):
PARSER_TAGS = {
"id": "fat32",
"description": "FAT32 filesystem",
"magic": (("FAT32 ", 82*8),),
}
version = 32
def validate(self):
return FAT_FS._validate(self, 82)
| gpl-3.0 |
narantech/linux-rpi | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
blaggacao/odoo | addons/gamification/models/res_users.py | 386 | 4010 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from challenge import MAX_VISIBILITY_RANKING
class res_users_gamification_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check gamification.challenge linked to
this group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
return self._serialised_goals_summary(cr, uid, user_id=uid, excluded_categories=excluded_categories, context=context)
def _serialised_goals_summary(self, cr, uid, user_id, excluded_categories=None, context=None):
"""Return a serialised list of goals assigned to the user, grouped by challenge
:excluded_categories: list of challenge categories to exclude in search
[
{
'id': <gamification.challenge id>,
'name': <gamification.challenge name>,
'visibility_mode': <visibility {ranking,personal}>,
'currency': <res.currency id>,
'lines': [(see gamification_challenge._get_serialized_challenge_lines() format)]
},
]
"""
all_goals_info = []
challenge_obj = self.pool.get('gamification.challenge')
domain = [('user_ids', 'in', uid), ('state', '=', 'inprogress')]
if excluded_categories and isinstance(excluded_categories, list):
domain.append(('category', 'not in', excluded_categories))
user = self.browse(cr, uid, uid, context=context)
challenge_ids = challenge_obj.search(cr, uid, domain, context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
# serialize goals info to be able to use it in javascript
lines = challenge_obj._get_serialized_challenge_lines(cr, uid, challenge, user_id, restrict_top=MAX_VISIBILITY_RANKING, context=context)
if lines:
all_goals_info.append({
'id': challenge.id,
'name': challenge.name,
'visibility_mode': challenge.visibility_mode,
'currency': user.company_id.currency_id.id,
'lines': lines,
})
return all_goals_info
def get_challenge_suggestions(self, cr, uid, context=None):
"""Return the list of challenges suggested to the user"""
challenge_info = []
challenge_obj = self.pool.get('gamification.challenge')
challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)
for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):
values = {
'id': challenge.id,
'name': challenge.name,
'description': challenge.description,
}
challenge_info.append(values)
return challenge_info
| agpl-3.0 |
slowfranklin/samba | source3/selftest/tests.py | 6 | 31988 | #!/usr/bin/python
# This script generates a list of testsuites that should be run as part of
# the Samba 3 test suite.
# The output of this script is parsed by selftest.pl, which then decides
# which of the tests to actually run. It will, for example, skip all tests
# listed in selftest/skip or only run a subset during "make quicktest".
# The idea is that this script outputs all of the tests of Samba 3, not
# just those that are known to pass, and list those that should be skipped
# or are known to fail in selftest/skip or selftest/samba3-knownfail. This makes it
# very easy to see what functionality is still missing in Samba 3 and makes
# it possible to run the testsuite against other servers, such as Samba 4 or
# Windows that have a different set of features.
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
import os, sys
sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "../../selftest")))
import selftesthelpers
from selftesthelpers import *
smbtorture4_options.extend([
'--option=torture:sharedelay=100000',
'--option=torture:writetimeupdatedelay=500000',
])
def plansmbtorture4testsuite(name, env, options, description=''):
if description == '':
modname = "samba3.%s" % (name, )
else:
modname = "samba3.%s %s" % (name, description)
selftesthelpers.plansmbtorture4testsuite(
name, env, options, target='samba3', modname=modname)
plantestsuite("samba3.blackbox.success", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/test_success.sh")])
plantestsuite("samba3.blackbox.failure", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/test_failure.sh")])
plantestsuite("samba3.local_s3", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/test_local_s3.sh")])
plantestsuite("samba3.blackbox.registry.upgrade", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/test_registry_upgrade.sh"), net, dbwrap_tool])
tests = ["FDPASS", "LOCK1", "LOCK2", "LOCK3", "LOCK4", "LOCK5", "LOCK6", "LOCK7", "LOCK9",
"UNLINK", "BROWSE", "ATTR", "TRANS2", "TORTURE",
"OPLOCK1", "OPLOCK2", "OPLOCK4", "STREAMERROR",
"DIR", "DIR1", "DIR-CREATETIME", "TCON", "TCONDEV", "RW1", "RW2", "RW3", "LARGE_READX", "RW-SIGNING",
"OPEN", "XCOPY", "RENAME", "DELETE", "DELETE-LN", "PROPERTIES", "W2K",
"TCON2", "IOCTL", "CHKPATH", "FDSESS", "CHAIN1", "CHAIN2",
"CHAIN3",
"GETADDRINFO", "UID-REGRESSION-TEST", "SHORTNAME-TEST",
"CASE-INSENSITIVE-CREATE", "SMB2-BASIC", "NTTRANS-FSCTL", "SMB2-NEGPROT",
"SMB2-SESSION-REAUTH", "SMB2-SESSION-RECONNECT",
"CLEANUP1",
"CLEANUP2",
"CLEANUP4",
"BAD-NBT-SESSION"]
for t in tests:
plantestsuite("samba3.smbtorture_s3.plain(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.crypt_client(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "-e", "-l $LOCAL_PATH"])
if t == "TORTURE":
# this is a negative test to verify that the server rejects
# access without encryption
plantestsuite("samba3.smbtorture_s3.crypt_server(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmpenc', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.plain(ad_dc_ntvfs).%s" % t, "ad_dc_ntvfs", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
# non-crypt only
tests = ["OPLOCK-CANCEL"]
for t in tests:
plantestsuite("samba3.smbtorture_s3.plain(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
tests = ["RW1", "RW2", "RW3"]
for t in tests:
plantestsuite("samba3.smbtorture_s3.vfs_aio_fork(simpleserver).%s" % t, "simpleserver", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/vfs_aio_fork', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
posix_tests = ["POSIX", "POSIX-APPEND"]
for t in posix_tests:
plantestsuite("samba3.smbtorture_s3.plain(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.crypt(nt4_dc).%s" % t, "nt4_dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "-e", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.plain(ad_dc_ntvfs).%s" % t, "ad_dc_ntvfs", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
env = "nt4_dc:local"
t = "CLEANUP3"
plantestsuite("samba3.smbtorture_s3.plain(%s).%s" % (env, t), env, [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', binpath('smbtorture3'), "", "-l $LOCAL_PATH"])
local_tests = [
"LOCAL-SUBSTITUTE",
"LOCAL-GENCACHE",
"LOCAL-TALLOC-DICT",
"LOCAL-BASE64",
"LOCAL-RBTREE",
"LOCAL-MEMCACHE",
"LOCAL-STREAM-NAME",
"LOCAL-string_to_sid",
"LOCAL-sid_to_string",
"LOCAL-binary_to_sid",
"LOCAL-DBTRANS",
"LOCAL-TEVENT-SELECT",
"LOCAL-CONVERT-STRING",
"LOCAL-CONV-AUTH-INFO",
"LOCAL-IDMAP-TDB-COMMON",
"LOCAL-MESSAGING-READ1",
"LOCAL-MESSAGING-READ2",
"LOCAL-MESSAGING-READ3",
"LOCAL-MESSAGING-READ4",
"LOCAL-MESSAGING-FDPASS1",
"LOCAL-MESSAGING-FDPASS2",
"LOCAL-MESSAGING-FDPASS2a",
"LOCAL-MESSAGING-FDPASS2b",
"LOCAL-hex_encode_buf",
"LOCAL-sprintf_append",
"LOCAL-remove_duplicate_addrs2"]
for t in local_tests:
plantestsuite("samba3.smbtorture_s3.%s" % t, "none", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//foo/bar', '""', '""', smbtorture3, ""])
plantestsuite("samba.vfstest.stream_depot", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/stream-depot/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.xattr-tdb-1", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/xattr-tdb-1/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.acl", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/vfstest-acl/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.catia", "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/vfstest-catia/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
for options in ["--option=clientusespnego=no", " --option=clientntlmv2auth=no --option=clientlanmanauth=yes --max-protocol=LANMAN2", ""]:
env = "nt4_dc"
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) %s" % (env, options), env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$DC_USERNAME', '$DC_PASSWORD', smbclient3, configuration, options])
for env in ["nt4_dc", "nt4_member", "ad_member", "ad_dc_ntvfs", "s4member"]:
plantestsuite("samba3.blackbox.smbclient_machine_auth.plain (%s:local)" % env, "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_machine_auth.sh"), '$SERVER', smbclient3, configuration])
for env in ["nt4_dc", "nt4_member", "ad_member"]:
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$DC_USERNAME', '$DC_PASSWORD', smbclient3, configuration])
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$SERVER/$USERNAME', '$PASSWORD', smbclient3, configuration])
for env in ["nt4_member", "ad_member"]:
plantestsuite("samba3.blackbox.net_cred_change.(%s:local)" % env, "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_net_cred_change.sh"), configuration])
env = "ad_member"
t = "--krb5auth=$DOMAIN/$DC_USERNAME%$DC_PASSWORD"
plantestsuite("samba3.wbinfo_simple.(%s:local).%s" % (env, t), "%s:local" % env, [os.path.join(srcdir(), "nsswitch/tests/test_wbinfo_simple.sh"), t])
t = "WBCLIENT-MULTI-PING"
plantestsuite("samba3.smbtorture_s3.%s" % t, env, [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//foo/bar', '""', '""', smbtorture3, ""])
plantestsuite("samba3.ntlm_auth.krb5(ktest:local) old ccache", "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_krb5.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$PREFIX/ktest/krb5_ccache-2', '$SERVER', configuration])
plantestsuite("samba3.ntlm_auth.krb5(ktest:local)", "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_krb5.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$PREFIX/ktest/krb5_ccache-3', '$SERVER', configuration])
for env in ["maptoguest", "simpleserver"]:
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) local creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', smbclient3, configuration + " --option=clientntlmv2auth=no --option=clientlanmanauth=yes"])
env = "maptoguest"
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) bad username" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', 'notmy$USERNAME', '$PASSWORD', smbclient3, configuration + " --option=clientntlmv2auth=no --option=clientlanmanauth=yes"])
# plain
for env in ["nt4_dc"]:
plantestsuite("samba3.blackbox.smbclient_s3.plain (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration])
for env in ["nt4_member", "ad_member"]:
plantestsuite("samba3.blackbox.smbclient_s3.plain (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$SERVER', '$SERVER/$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration])
for env in ["nt4_dc"]:
plantestsuite("samba3.blackbox.smbclient_s3.sign (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "--signing=required"])
for env in ["nt4_member", "ad_member"]:
plantestsuite("samba3.blackbox.smbclient_s3.sign (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$SERVER', '$SERVER/$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "--signing=required"])
for env in ["nt4_dc"]:
# encrypted
plantestsuite("samba3.blackbox.smbclient_s3.crypt (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "-e"])
for env in ["fileserver"]:
plantestsuite("samba3.blackbox.preserve_case (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_preserve_case.sh"), '$SERVER', '$DOMAIN', '$USERNAME', '$PASSWORD', '$PREFIX', smbclient3])
plantestsuite("samba3.blackbox.dfree_command (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_dfree_command.sh"), '$SERVER', '$DOMAIN', '$USERNAME', '$PASSWORD', '$PREFIX', smbclient3])
plantestsuite("samba3.blackbox.valid_users (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_valid_users.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$USERNAME', '$PASSWORD', '$PREFIX', smbclient3])
#
# tar command tests
#
# find config.h
try:
config_h = os.environ["CONFIG_H"]
except KeyError:
config_h = os.path.join(samba4bindir, "default/include/config.h")
# see if libarchive is supported
f = open(config_h, 'r')
try:
have_libarchive = ("HAVE_LIBARCHIVE 1" in f.read())
finally:
f.close()
# tar command enabled only if built with libarchive
if have_libarchive:
# Test smbclient/tarmode
plantestsuite("samba3.blackbox.smbclient_tarmode (%s)" % env, env,
[os.path.join(samba3srcdir, "script/tests/test_smbclient_tarmode.sh"),
'$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD',
'$LOCAL_PATH', '$PREFIX', smbclient3, configuration])
# Test suite for new smbclient/tar with libarchive (GSoC 13)
plantestsuite("samba3.blackbox.smbclient_tar (%s)" % env, env,
[os.path.join(samba3srcdir, "script/tests/test_smbclient_tarmode.pl"),
'-n', '$SERVER', '-i', '$SERVER_IP', '-s', 'tmp',
'-u', '$USERNAME', '-p', '$PASSWORD', '-l', '$LOCAL_PATH',
'-d', '$PREFIX', '-b', smbclient3,
'--subunit', '--', configuration])
#TODO encrypted against member, with member creds, and with DC creds
plantestsuite("samba3.blackbox.net.misc", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_misc.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.local.registry", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.registry.check", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_check.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, dbwrap_tool])
plantestsuite("samba3.blackbox.net.rpc.registry", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_net_registry.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.net.local.registry.roundtrip", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_roundtrip.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.rpc.registry.roundtrip", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_roundtrip.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.net.local.conf", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_conf.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.rpc.conf", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_net_conf.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.testparm", "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_testparm_s3.sh"),
"$LOCAL_PATH"])
plantestsuite(
"samba3.pthreadpool", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_pthreadpool.sh")])
#smbtorture4 tests
base = ["base.attr", "base.charset", "base.chkpath", "base.defer_open", "base.delaywrite", "base.delete",
"base.deny1", "base.deny2", "base.deny3", "base.denydos", "base.dir1", "base.dir2",
"base.disconnect", "base.fdpass", "base.lock",
"base.mangle", "base.negnowait", "base.ntdeny1",
"base.ntdeny2", "base.open", "base.openattr", "base.properties", "base.rename", "base.rw1",
"base.secleak", "base.tcon", "base.tcondev", "base.trans2", "base.unlink", "base.vuid",
"base.xcopy", "base.samba3error"]
raw = ["raw.acls", "raw.chkpath", "raw.close", "raw.composite", "raw.context", "raw.eas",
"raw.ioctl", "raw.lock", "raw.mkdir", "raw.mux", "raw.notify", "raw.open", "raw.oplock",
"raw.qfileinfo", "raw.qfsinfo", "raw.read", "raw.rename", "raw.search", "raw.seek",
"raw.sfileinfo.base", "raw.sfileinfo.bug", "raw.streams", "raw.unlink", "raw.write",
"raw.samba3hide", "raw.samba3badpath", "raw.sfileinfo.rename", "raw.session",
"raw.samba3caseinsensitive", "raw.samba3posixtimedlock",
"raw.samba3rootdirfid", "raw.sfileinfo.end-of-file",
"raw.bench-oplock", "raw.bench-lock", "raw.bench-open", "raw.bench-tcon",
"raw.samba3checkfsp", "raw.samba3closeerr", "raw.samba3oplocklogoff", "raw.samba3badnameblob"]
smb2 = smbtorture4_testsuites("smb2.")
rpc = ["rpc.authcontext", "rpc.samba3.bind", "rpc.samba3.srvsvc", "rpc.samba3.sharesec",
"rpc.samba3.spoolss", "rpc.samba3.wkssvc", "rpc.samba3.winreg",
"rpc.samba3.getaliasmembership-0",
"rpc.samba3.netlogon", "rpc.samba3.sessionkey", "rpc.samba3.getusername",
"rpc.samba3.smb1-pipe-name", "rpc.samba3.smb2-pipe-name",
"rpc.samba3.smb-reauth1", "rpc.samba3.smb-reauth2",
"rpc.svcctl", "rpc.ntsvcs", "rpc.winreg", "rpc.eventlog",
"rpc.spoolss.printserver", "rpc.spoolss.win", "rpc.spoolss.notify", "rpc.spoolss.printer",
"rpc.spoolss.driver",
"rpc.lsa", "rpc.lsa-getuser", "rpc.lsa.lookupsids", "rpc.lsa.lookupnames",
"rpc.lsa.privileges", "rpc.lsa.secrets",
"rpc.samr", "rpc.samr.users", "rpc.samr.users.privileges", "rpc.samr.passwords",
"rpc.samr.passwords.pwdlastset", "rpc.samr.passwords.lockout", "rpc.samr.passwords.badpwdcount", "rpc.samr.large-dc", "rpc.samr.machine.auth",
"rpc.samr.priv", "rpc.samr.passwords.validate",
"rpc.netlogon.admin",
"rpc.schannel", "rpc.schannel2", "rpc.bench-schannel1", "rpc.schannel_anon_setpw", "rpc.join", "rpc.bind"]
local = ["local.nss", "local.ndr"]
idmap = [ "idmap.rfc2307" ]
rap = ["rap.basic", "rap.rpc", "rap.printing", "rap.sam"]
unix = ["unix.info2", "unix.whoami"]
nbt = ["nbt.dgram" ]
libsmbclient = ["libsmbclient"]
vfs = ["vfs.fruit"]
tests= base + raw + smb2 + rpc + unix + local + rap + nbt + libsmbclient + idmap + vfs
for t in tests:
if t == "base.delaywrite":
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD -k yes --maximum-runtime=900')
elif t == "rap.sam":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=doscharset=ISO-8859-1')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=doscharset=ISO-8859-1')
elif t == "winbind.pac":
plansmbtorture4testsuite(t, "ad_member:local", '//$SERVER/tmp --realm=$REALM --machine-pass --option=torture:addc=$DC_SERVER', description="machine account")
elif t == "unix.whoami":
plansmbtorture4testsuite(t, "nt4_member:local", '//$SERVER/tmp --machine-pass', description="machine account")
plansmbtorture4testsuite(t, "ad_member:local", '//$SERVER/tmp --machine-pass --option=torture:addc=$DC_SERVER', description="machine account")
for env in ["nt4_dc", "nt4_member"]:
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -U$DC_USERNAME%$DC_PASSWORD')
plansmbtorture4testsuite(t, env, '//$SERVER/tmpguest -U%', description='anonymous connection')
for env in ["ad_dc", "ad_member"]:
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -U$DC_USERNAME@$REALM%$DC_PASSWORD --option=torture:addc=$DC_SERVER')
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -k yes -U$DC_USERNAME@$REALM%$DC_PASSWORD --option=torture:addc=$DC_SERVER', description='kerberos connection')
plansmbtorture4testsuite(t, env, '//$SERVER/tmpguest -U% --option=torture:addc=$DC_SERVER', description='anonymous connection')
elif t == "raw.samba3posixtimedlock":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmpguest -U$USERNAME%$PASSWORD --option=torture:localdir=$SELFTEST_PREFIX/nt4_dc/share')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmpguest -U$USERNAME%$PASSWORD --option=torture:localdir=$SELFTEST_PREFIX/ad_dc/share')
elif t == "raw.chkpath":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
elif t == "raw.samba3hide" or t == "raw.samba3checkfsp" or t == "raw.samba3closeerr":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "simpleserver", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "raw.session" or t == "smb2.session":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'plain')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmpenc -U$USERNAME%$PASSWORD', 'enc')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -k no -U$USERNAME%$PASSWORD', 'ntlm')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -k yes -U$USERNAME%$PASSWORD', 'krb5')
elif t == "rpc.lsa":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'over ncacn_np ')
plansmbtorture4testsuite(t, "nt4_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'over ncacn_np ')
plansmbtorture4testsuite(t, "ad_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
elif t == "rpc.samr.passwords.validate":
plansmbtorture4testsuite(t, "nt4_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
plansmbtorture4testsuite(t, "ad_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
elif t == "smb2.durable-open" or t == "smb2.durable-v2-open" or t == "smb2.replay":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/durable -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/durable -U$USERNAME%$PASSWORD')
elif t == "base.rw1":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/valid-users-tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/write-list-tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "idmap.rfc2307":
plantestsuite(t, "ad_member_rfc2307", [os.path.join(samba3srcdir, "../nsswitch/tests/test_idmap_rfc2307.sh"), '$DOMAIN', 'Administrator', '2000000', 'Guest', '2000001', '"Domain Users"', '2000002', 'DnsAdmins', '2000003', 'ou=idmap,dc=samba,dc=example,dc=com', '$DC_SERVER', '$DC_USERNAME', '$DC_PASSWORD'])
elif t == "raw.acls":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/nfs4acl_simple -U$USERNAME%$PASSWORD', description='nfs4acl_xattr-simple')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/nfs4acl_special -U$USERNAME%$PASSWORD', description='nfs4acl_xattr-special')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
elif t == "smb2.ioctl":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/fs_specific -U$USERNAME%$PASSWORD', 'fs_specific')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "smb2.lock":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/aio -U$USERNAME%$PASSWORD', 'aio')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "raw.read":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/aio -U$USERNAME%$PASSWORD', 'aio')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "raw.search":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
# test the dirsort module.
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmpsort -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "vfs.fruit":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=torture:share1=vfs_fruit --option=torture:share2=tmp --option=torture:localdir=$SELFTEST_PREFIX/nt4_dc/share')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=torture:share1=vfs_fruit --option=torture:share2=tmp --option=torture:localdir=$SELFTEST_PREFIX/ad_dc/share')
elif t == "rpc.schannel_anon_setpw":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$%', description="anonymous password set")
plansmbtorture4testsuite(t, "nt4_dc_schannel", '//$SERVER_IP/tmp -U$%', description="anonymous password set (schannel enforced server-side)")
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$%', description="anonymous password set")
elif t == "local.nss":
for env in ["nt4_dc:local", "ad_member:local", "nt4_member:local", "ad_dc:local", "ad_dc_ntvfs:local"]:
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "smb2.notify":
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --signing=required')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD --signing=required')
else:
plansmbtorture4testsuite(t, "nt4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "ad_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
test = 'rpc.lsa.lookupsids'
auth_options = ["", "ntlm", "spnego", "spnego,ntlm" ]
signseal_options = ["", ",connect", ",sign", ",seal"]
endianness_options = ["", ",bigendian"]
for s in signseal_options:
for e in endianness_options:
for a in auth_options:
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
options = binding_string + " -U$USERNAME%$PASSWORD"
plansmbtorture4testsuite(test, "nt4_dc", options, 'over ncacn_np with [%s%s%s] ' % (a, s, e))
plantestsuite("samba3.blackbox.rpcclient over ncacn_np with [%s%s%s] " % (a, s, e), "nt4_dc:local", [os.path.join(samba3srcdir, "script/tests/test_rpcclient.sh"),
"none", options, configuration])
# We should try more combinations in future, but this is all
# the pre-calculated credentials cache supports at the moment
e = ""
a = ""
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
options = binding_string + " -k yes --krb5-ccache=$PREFIX/ktest/krb5_ccache-2"
plansmbtorture4testsuite(test, "ktest", options, 'krb5 with old ccache ncacn_np with [%s%s%s] ' % (a, s, e))
options = binding_string + " -k yes --krb5-ccache=$PREFIX/ktest/krb5_ccache-3"
plansmbtorture4testsuite(test, "ktest", options, 'krb5 ncacn_np with [%s%s%s] ' % (a, s, e))
auth_options2 = ["krb5", "spnego,krb5"]
for a in auth_options2:
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
plantestsuite("samba3.blackbox.rpcclient krb5 ncacn_np with [%s%s%s] " % (a, s, e), "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_rpcclient.sh"),
"$PREFIX/ktest/krb5_ccache-3", binding_string, "-k", configuration])
plantestsuite("samba3.blackbox.rpcclient_samlogon", "ad_member:local", [os.path.join(samba3srcdir, "script/tests/test_rpcclient_samlogon.sh"),
"$DC_USERNAME", "$DC_PASSWORD", "ncacn_np:$DC_SERVER", configuration])
plantestsuite("samba3.blackbox.sharesec", "simpleserver:local",
[os.path.join(samba3srcdir, "script/tests/test_sharesec.sh"),
configuration, os.path.join(bindir(), "sharesec"), "tmp"])
plantestsuite("samba3.blackbox.net_dom_join_fail_dc", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_net_dom_join_fail_dc.sh"),
"$USERNAME", "$PASSWORD", "$SERVER", "$PREFIX/net_dom_join_fail_dc",
configuration])
plantestsuite("samba3.blackbox.net_rpc_join", "nt4_dc",
[os.path.join(samba3srcdir, "script/tests/test_net_rpc_join.sh"),
"$USERNAME", "$PASSWORD", "$SERVER", "$PREFIX/net_rpc_join",
configuration])
plantestsuite("samba3.blackbox.rpcclient_srvsvc", "simpleserver",
[os.path.join(samba3srcdir, "script/tests/test_rpcclientsrvsvc.sh"),
"$USERNAME", "$PASSWORD", "$SERVER",
os.path.join(bindir(), "rpcclient"), "tmp"])
options_list = ["", "-e"]
for options in options_list:
plantestsuite("samba3.blackbox.smbclient_krb5 old ccache %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_krb5.sh"),
"$PREFIX/ktest/krb5_ccache-2",
smbclient3, "$SERVER", options, configuration])
plantestsuite("samba3.blackbox.smbclient_krb5 old ccache %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_krb5.sh"),
"$PREFIX/ktest/krb5_ccache-2",
smbclient3, "$SERVER", options, configuration])
plantestsuite("samba3.blackbox.smbclient_large_file %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"$PREFIX/ktest/krb5_ccache-3",
smbclient3, "$SERVER", "$PREFIX", options, "-k " + configuration])
plantestsuite("samba3.blackbox.smbclient_posix_large %s krb5" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"$PREFIX/ktest/krb5_ccache-3",
smbclient3, "$SERVER", "$PREFIX", options, "-k " + configuration])
plantestsuite("samba3.blackbox.smbclient_posix_large %s NTLM" % options, "nt4_dc:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"none",
smbclient3, "$SERVER", "$PREFIX", options, "-U$USERNAME%$PASSWORD " + configuration])
for e in endianness_options:
for a in auth_options:
for s in signseal_options:
binding_string = "ncacn_ip_tcp:$SERVER_IP[%s%s%s]" % (a, s, e)
options = binding_string + " -U$USERNAME%$PASSWORD"
plansmbtorture4testsuite(test, "nt4_dc", options, 'over ncacn_ip_tcp with [%s%s%s] ' % (a, s, e))
plansmbtorture4testsuite('rpc.epmapper', 'nt4_dc:local', 'ncalrpc: -U$USERNAME%$PASSWORD', 'over ncalrpc')
plansmbtorture4testsuite('rpc.fsrvp', 'nt4_dc:local', 'ncacn_np:$SERVER_IP[/pipe/FssagentRpc] -U$USERNAME%$PASSWORD', 'over ncacn_np')
| gpl-3.0 |
teemulehtinen/a-plus | exercise/cache/hierarchy.py | 3 | 10105 | from course.models import CourseModule, LearningObjectCategory
from ..models import LearningObject
class NoSuchContent(Exception):
pass
class HierarchyIterator(object):
def __init__(self, children, idx=None, tree=None, visited=False, enclosed=True):
if idx is None:
self._default_start(children)
else:
self.idx = idx.copy()
self.levels = [children]
if tree and len(tree) > 1:
for entry in tree[:-1]:
self.levels.append(entry['children'])
self.visited = visited
self.enclose_begun = not enclosed
self.enclose_ended = not enclosed
def __iter__(self):
return self
class NextIterator(HierarchyIterator):
def _default_start(self, children):
self.idx = [0]
self.levels = [children]
def __next__(self):
if not self.enclose_begun:
self.enclose_begun = True
return {'type':'level','down':True}
i = self.idx[-1]
level = self.levels[-1]
if not self.visited:
if i < len(level):
self.visited = True
return level[i]
else:
children = level[i].get('children')
if children:
self.levels.append(children)
self.idx.append(0)
self.visited = False
return {'type':'level','down':True}
i += 1
if i < len(level):
self.idx[-1] = i
return level[i]
if len(self.idx) > 1:
self.idx = self.idx[:-1]
self.levels = self.levels[:-1]
self.idx[-1] += 1
self.visited = False
return {'type':'level','up':True}
if not self.enclose_ended:
self.enclose_ended = True
return {'type':'level','up':True}
raise StopIteration()
class PreviousIterator(HierarchyIterator):
def _default_start(self, children):
self.idx = []
self.levels = []
self._goto_last(children)
def _goto_last(self, children):
level = children
while level:
i = len(level) - 1
self.idx.append(i)
self.levels.append(level)
level = level[i].get('children')
def __next__(self):
i = self.idx[-1]
level = self.levels[-1]
if not self.visited:
self.visited = True
return level[i]
elif i > 0:
i -= 1
self.idx[-1] = i
self._goto_last(level[i].get('children'))
return self.levels[-1][self.idx[-1]]
elif len(self.idx) > 1:
self.idx = self.idx[:-1]
self.levels = self.levels[:-1]
return self.levels[-1][self.idx[-1]]
raise StopIteration()
class ContentMixin(object):
def created(self):
return self.data['created']
def total(self):
return self.data['total']
def modules(self):
return self.data['modules']
def modules_flatted(self):
for module in self.data['modules']:
module['flatted'] = self.flat_module(module)
return self.data['modules']
def categories(self):
categories = list(self.data['categories'].values())
categories.sort(key=lambda entry: entry['name'])
return categories
def flat_module(self, module, enclosed=True):
modules = self.modules()
idx = self._model_idx(module)
tree = self._by_idx(modules, idx)
return NextIterator(tree[0]['children'], enclosed=enclosed)
def flat_full(self):
return NextIterator(self.modules(), enclosed=False)
def begin(self):
for entry in self.flat_full():
if entry['type'] == 'exercise':
return entry
return None
def find_path(self, module_id, path):
paths = self.data['paths'].get(module_id, {})
if path in paths:
return paths[path]
raise NoSuchContent()
def find_number(self, number):
hit = None
search = self.modules()
parts = number.split('.')
for i in range(len(parts)):
number = '.'.join(parts[0:i+1])
for s in search:
if s['number'] == number:
hit = s
search = hit['children']
break
if not hit:
raise NoSuchContent()
return hit
def find_category(self, category_id):
categories = self.data['categories']
if category_id in categories:
return categories[category_id]
raise NoSuchContent()
def find(self, model):
modules = self.modules()
idx = self._model_idx(model)
tree = self._by_idx(modules, idx)
return (
tree[-1],
tree,
self._previous(idx, tree),
self._next(idx, tree),
)
def search_exercises(self, **kwargs):
_, entries = self.search_entries(**kwargs)
return [e for e in entries if e['type'] == 'exercise']
def search_entries(self, number=None, category_id=None, module_id=None,
exercise_id=None, filter_for_assistant=False, best=False):
entry = None
if number:
try:
entry = self.find_number(number)
if entry['type'] == 'module':
module_id = entry['id']
elif entry['type'] == 'exercise':
exercise_id = entry['id']
except NoSuchContent:
pass
search = None
if not exercise_id is None:
search = { 'type': 'exercise', 'id': int(exercise_id) }
elif not module_id is None:
search = { 'type': 'module', 'id': int(module_id) }
if search:
idx = self._model_idx(search)
tree = self._by_idx(self.modules(), idx)
else:
tree = [{ 'type': 'all', 'children': self.modules() }]
exercises = []
def recursion(entry):
if (
entry['type'] == 'module' or (
entry['type'] == 'exercise' and
(category_id is None or entry['category_id'] == category_id) and
(not filter_for_assistant or entry['allow_assistant_viewing'])
)
):
exercises.append(entry)
for child in entry['children']:
recursion(child)
recursion(tree[-1])
return entry, exercises
def _previous(self, idx, tree):
for entry in PreviousIterator(self.modules(), idx, tree, visited=True):
if self.is_listed(entry):
return entry
return None
def _next(self, idx, tree):
for entry in NextIterator(self.modules(), idx, tree, visited=True, enclosed=False):
if self.is_listed(entry):
return entry
return None
def _model_idx(self, model):
def find(index, search):
if search in index:
return index[search]
raise NoSuchContent()
entry_type = None
if isinstance(model, dict):
entry_type = model.get('type', None)
if entry_type == 'module':
return find(self.data['module_index'], model['id'])
elif entry_type == 'exercise':
return find(self.data['exercise_index'], model['id'])
elif isinstance(model, CourseModule):
return find(self.data['module_index'], model.id)
elif isinstance(model, LearningObject):
return find(self.data['exercise_index'], model.id)
else:
raise NoSuchContent()
@classmethod
def _by_idx(cls, hierarchy, idx):
tree = []
for i in idx:
entry = hierarchy[i]
hierarchy = entry['children']
tree.append(entry)
return tree
@classmethod
def _add_by_difficulty(cls, to, difficulty, points):
if difficulty in to:
to[difficulty] += points
else:
to[difficulty] = points
@classmethod
def is_visible(cls, entry):
t = entry['type']
if t == 'exercise':
return (
entry.get('category_status') != LearningObjectCategory.STATUS.HIDDEN
and entry.get('module_status') != CourseModule.STATUS.HIDDEN
and not entry['status'] in (
LearningObject.STATUS.HIDDEN,
LearningObject.STATUS.ENROLLMENT,
LearningObject.STATUS.ENROLLMENT_EXTERNAL,
)
)
if t == 'module':
return entry['status'] != CourseModule.STATUS.HIDDEN
if t == 'category':
return not entry['status'] in (
LearningObjectCategory.STATUS.HIDDEN,
LearningObjectCategory.STATUS.NOTOTAL,
)
return False
@classmethod
def is_listed(cls, entry):
if not cls.is_visible(entry):
return False
t = entry['type']
if t == 'exercise':
return (
entry.get('category_status') != LearningObjectCategory.STATUS.HIDDEN
and entry.get('module_status') != CourseModule.STATUS.UNLISTED
and entry['status'] != LearningObject.STATUS.UNLISTED
)
if t == 'module':
return entry['status'] != CourseModule.STATUS.UNLISTED
if t == 'category':
return entry['status'] != LearningObjectCategory.STATUS.HIDDEN
return True
@classmethod
def is_in_maintenance(cls, entry):
t = entry['type']
if t == 'exercise':
return (
entry['module_status'] == CourseModule.STATUS.MAINTENANCE
or entry['status'] == LearningObject.STATUS.MAINTENANCE
)
if t == 'module':
return entry['status'] == CourseModule.STATUS.MAINTENANCE
return False
| gpl-3.0 |
thortex/rpi3-webiopi | webiopi_0.7.1/python/webiopi/devices/onewire.py | 1 | 2571 | # Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from webiopi.devices.bus import Bus, loadModule
EXTRAS = {
"TEMP": {"loaded": False, "module": "w1-therm"},
"2408": {"loaded": False, "module": "w1_ds2408"},
"2413": {"loaded": False, "module": "w1_ds2413"}
}
def loadExtraModule(name):
if EXTRAS[name]["loaded"] == False:
loadModule(EXTRAS[name]["module"])
EXTRAS[name]["loaded"] = True
class OneWire(Bus):
def __init__(self, slave=None, family=0, extra=None):
Bus.__init__(self, "ONEWIRE", "/sys/bus/w1/devices/w1_bus_master1/w1_master_slaves", os.O_RDONLY)
if self.fd > 0:
os.close(self.fd)
self.fd = 0
self.family = family
if slave != None:
addr = slave.split("-")
if len(addr) == 1:
self.slave = "%02x-%s" % (family, slave)
elif len(addr) == 2:
prefix = int(addr[0], 16)
if family > 0 and family != prefix:
raise Exception("1-Wire slave address %s does not match family %02x" % (slave, family))
self.slave = slave
else:
devices = self.deviceList()
if len(devices) == 0:
raise Exception("No device match family %02x" % family)
self.slave = devices[0]
loadExtraModule(extra)
def __str__(self):
return "1-Wire(slave=%s)" % self.slave
def deviceList(self):
devices = []
with open(self.device) as f:
lines = f.read().split("\n")
if self.family > 0:
prefix = "%02x-" % self.family
for line in lines:
if line.startswith(prefix):
devices.append(line)
else:
devices = lines
return devices;
def read(self):
with open("/sys/bus/w1/devices/%s/w1_slave" % self.slave) as f:
data = f.read()
return data
| apache-2.0 |
jkandasa/integration_tests | cfme/utils/soft_get.py | 10 | 3289 | import collections
from difflib import SequenceMatcher
class MultipleResultsException(Exception):
pass
def soft_get(obj,
field_base_name,
dict_=False,
case_sensitive=False,
best_match=True,
dont_include=None):
"""
This function used for cases that we want to get some attribute that we
either know only few parts of its name or want to prevent from case issues.
Example:
Imagine you have a relationships table and you want to get 'image' field.
Since sometimes the exact name of the field is changing among versions, pages, etc.
it could be appear as 'Images', 'Image', 'Container Images', Containers Images', etc.
Since we don't care for the exact name and know that 'image' is a unique in the table,
we can use this function to prevent from this complexity.
Args:
* obj: The object which we want to get the attribute
* field_base_name: The base name, a string that we know
for sure that is a sub-string of the target field
* dict_: Whether this is a dict AND we want to perform the same functionality on its keys
* case_sensitive: Whether the search is a sensitive case.
* best_match: If True: in case that it found more than 1 match field,
it will take the closest one
If False: in case that it found more than 1 match field,
it will raise an error
* dont_include: Strings that should not be a part of the field.
Used to prevent cases like: soft_get(obj, 'image') -> obj.image_registry
Returns:
The value of the target attribute
"""
dont_include = dont_include or []
signature = ('soft_get({}, {}, dict_={}, case_sensitive={})'
.format(obj, field_base_name, dict_, case_sensitive))
if not case_sensitive:
field_base_name = field_base_name.lower()
if dict_:
if not isinstance(obj, collections.Mapping):
raise TypeError('{}: {} is not a dict (type={}). '
.format(signature, obj, type(obj)))
all_fields = obj.keys()
else:
all_fields = dir(obj)
found_fields = []
if not case_sensitive:
dont_include = [s.lower() for s in dont_include]
for field in all_fields:
origin_field = field
if not case_sensitive:
field = field.lower()
if (field_base_name in field) and \
all([(s not in field) for s in dont_include]):
found_fields.append(origin_field)
if not found_fields:
raise AttributeError('{}: Could not find a member for field {}.'
.format(signature, field_base_name))
elif len(found_fields) > 1:
if not best_match:
raise MultipleResultsException('{}: Found more than 1 member for {}: {}'
.format(signature, field_base_name, found_fields))
found_fields = [max(found_fields, key=lambda s:
SequenceMatcher(None, s, field_base_name).ratio())]
if dict_:
return obj[found_fields[0]]
return getattr(obj, found_fields[0])
| gpl-2.0 |
Jollytown/Garuda | server/garuda/lib/python2.7/site-packages/django/db/models/aggregates.py | 89 | 2785 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in aggregates:
return aggregates[level_n_lookup], lookup_parts[n:]
return False, ()
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| mit |
madjam/mxnet | example/warpctc/lstm_model.py | 28 | 2886 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, lstm_inference_symbol
class LSTMInferenceModel(object):
def __init__(self,
num_lstm_layer,
seq_len,
num_hidden,
num_label,
arg_params,
data_size,
ctx=mx.cpu()):
self.sym = lstm_inference_symbol(num_lstm_layer,
seq_len,
num_hidden,
num_label)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
data_shape = [("data", (batch_size, data_size))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=ctx, **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(num_lstm_layer):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob
| apache-2.0 |
nhicher/ansible | lib/ansible/modules/network/fortimanager/fmgr_sys_proxy.py | 16 | 5657 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_sys_proxy
version_added: "2.8"
author: Andrew Welsh
short_description: Make FortiGate API calls via the FortiMananger
description:
- The FMG proxies FOS API calls via the FMG. Review FortiGate API documentation to ensure you are passing correct
parameters for both the FortiManager and FortiGate
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
action:
description:
- Specify HTTP action for the request. Either 'get' or 'post'
required: True
payload:
description:
- JSON payload of the request. The payload will be URL-encoded and becomes the "json" field in the query string for both GET and POST request.
required: False
resource:
description:
- URL on the remote device to be accessed, string
required: True
target:
description:
- FOS datasource, either device or group object
required: True
'''
EXAMPLES = '''
- name: Proxy FOS requests via FMG
hosts: FortiManager
connection: local
gather_facts: False
tasks:
- name: Get upgrade path for FGT1
fmgr_provision:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
action: "get"
resource: "/api/v2/monitor/system/firmware/upgrade-paths?vdom=root"
target: ["/adom/root/device/FGT1"]
- name: Upgrade firmware of FGT1
fmgr_provision:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
action: "post"
payload: {source: upload, file_content: b64_encoded_string, file_name: file_name}
resource: "/api/v2/monitor/system/firmware/upgrade?vdom=vdom"
target: ["/adom/root/device/FGT1"]
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: string
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def fos_request(fmg, action, resource, target, payload, adom='root'):
datagram = {
"data": {
# get or post
"action": action,
# dictionary of data
"payload": payload,
# FOS API URL including vdom params
"resource": resource,
# FMG device to make API calls to
"target": target
},
}
url = "/sys/proxy/json"
status, response = fmg.execute(url, datagram)
return status, response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"]), no_log=True),
action=dict(required=False, type="str"),
resource=dict(required=False, type="str"),
target=dict(required=False, type="str"),
payload=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True, )
action = module.params["action"]
resource = module.params["resource"]
target = module.params["target"]
payload = module.params["payload"]
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if response[1]['status']['code'] != 0:
module.fail_json(msg="Connection to FortiManager Failed")
else:
if module.params["adom"] is None:
module.params["adom"] = 'root'
status, result = fos_request(fmg, action, resource, target, payload, module.params["adom"])
if not status == 0:
module.fail_json(msg="Failure showing upgrade path", **result)
fmg.logout()
# results is returned as a tuple
return module.exit_json(changed=True, **result)
if __name__ == "__main__":
main()
| gpl-3.0 |
pymedusa/Medusa | ext/oauthlib/oauth1/rfc5849/parameters.py | 6 | 4969 | # -*- coding: utf-8 -*-
"""
oauthlib.parameters
~~~~~~~~~~~~~~~~~~~
This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec.
.. _`section 3.5`: https://tools.ietf.org/html/rfc5849#section-3.5
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import extract_params, urlencode
from . import utils
try:
from urlparse import urlparse, urlunparse
except ImportError: # noqa
from urllib.parse import urlparse, urlunparse
# TODO: do we need filter_params now that oauth_params are handled by Request?
# We can easily pass in just oauth protocol params.
@utils.filter_params
def prepare_headers(oauth_params, headers=None, realm=None):
"""**Prepare the Authorization header.**
Per `section 3.5.1`_ of the spec.
Protocol parameters can be transmitted using the HTTP "Authorization"
header field as defined by `RFC2617`_ with the auth-scheme name set to
"OAuth" (case insensitive).
For example::
Authorization: OAuth realm="Example",
oauth_consumer_key="0685bd9184jfhq22",
oauth_token="ad180jjd733klru7",
oauth_signature_method="HMAC-SHA1",
oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
oauth_timestamp="137131200",
oauth_nonce="4572616e48616d6d65724c61686176",
oauth_version="1.0"
.. _`section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
.. _`RFC2617`: https://tools.ietf.org/html/rfc2617
"""
headers = headers or {}
# Protocol parameters SHALL be included in the "Authorization" header
# field as follows:
authorization_header_parameters_parts = []
for oauth_parameter_name, value in oauth_params:
# 1. Parameter names and values are encoded per Parameter Encoding
# (`Section 3.6`_)
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
escaped_name = utils.escape(oauth_parameter_name)
escaped_value = utils.escape(value)
# 2. Each parameter's name is immediately followed by an "=" character
# (ASCII code 61), a """ character (ASCII code 34), the parameter
# value (MAY be empty), and another """ character (ASCII code 34).
part = '{0}="{1}"'.format(escaped_name, escaped_value)
authorization_header_parameters_parts.append(part)
# 3. Parameters are separated by a "," character (ASCII code 44) and
# OPTIONAL linear whitespace per `RFC2617`_.
#
# .. _`RFC2617`: https://tools.ietf.org/html/rfc2617
authorization_header_parameters = ', '.join(
authorization_header_parameters_parts)
# 4. The OPTIONAL "realm" parameter MAY be added and interpreted per
# `RFC2617 section 1.2`_.
#
# .. _`RFC2617 section 1.2`: https://tools.ietf.org/html/rfc2617#section-1.2
if realm:
# NOTE: realm should *not* be escaped
authorization_header_parameters = ('realm="%s", ' % realm +
authorization_header_parameters)
# the auth-scheme name set to "OAuth" (case insensitive).
authorization_header = 'OAuth %s' % authorization_header_parameters
# contribute the Authorization header to the given headers
full_headers = {}
full_headers.update(headers)
full_headers['Authorization'] = authorization_header
return full_headers
def _append_params(oauth_params, params):
"""Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
"""
merged = list(params)
merged.extend(oauth_params)
# The request URI / entity-body MAY include other request-specific
# parameters, in which case, the protocol parameters SHOULD be appended
# following the request-specific parameters, properly separated by an "&"
# character (ASCII code 38)
merged.sort(key=lambda i: i[0].startswith('oauth_'))
return merged
def prepare_form_encoded_body(oauth_params, body):
"""Prepare the Form-Encoded Body.
Per `section 3.5.2`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
"""
# append OAuth params to the existing body
return _append_params(oauth_params, body)
def prepare_request_uri_query(oauth_params, uri):
"""Prepare the Request URI Query.
Per `section 3.5.3`_ of the spec.
.. _`section 3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
"""
# append OAuth params to the existing set of query components
sch, net, path, par, query, fra = urlparse(uri)
query = urlencode(
_append_params(oauth_params, extract_params(query) or []))
return urlunparse((sch, net, path, par, query, fra))
| gpl-3.0 |
erja-gp/openthread | tools/harness-automation/cases/router_7_1_2.py | 16 | 1875 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_7_1_2(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '7 1 2'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
zhangjunlei26/servo | python/servo/build_commands.py | 2 | 12033 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import os
import os.path as path
import subprocess
import sys
from time import time
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd
def is_headless_build():
return int(os.getenv('SERVO_HEADLESS', 0)) == 1
def notify_linux(title, text):
try:
import dbus
bus = dbus.SessionBus()
notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications")
method(title, 0, "", text, "", [], [], -1)
except:
raise Exception("Please make sure that the Python dbus module is installed!")
def notify_win(title, text):
from ctypes import Structure, windll, POINTER, sizeof
from ctypes.wintypes import DWORD, HANDLE, WINFUNCTYPE, BOOL, UINT
class FLASHWINDOW(Structure):
_fields_ = [("cbSize", UINT),
("hwnd", HANDLE),
("dwFlags", DWORD),
("uCount", UINT),
("dwTimeout", DWORD)]
FlashWindowExProto = WINFUNCTYPE(BOOL, POINTER(FLASHWINDOW))
FlashWindowEx = FlashWindowExProto(("FlashWindowEx", windll.user32))
FLASHW_CAPTION = 0x01
FLASHW_TRAY = 0x02
FLASHW_TIMERNOFG = 0x0C
params = FLASHWINDOW(sizeof(FLASHWINDOW),
windll.kernel32.GetConsoleWindow(),
FLASHW_CAPTION | FLASHW_TRAY | FLASHW_TIMERNOFG, 3, 0)
FlashWindowEx(params)
def notify_darwin(title, text):
import Foundation
import objc
NSUserNotification = objc.lookUpClass("NSUserNotification")
NSUserNotificationCenter = objc.lookUpClass("NSUserNotificationCenter")
note = NSUserNotification.alloc().init()
note.setTitle_(title)
note.setInformativeText_(text)
now = Foundation.NSDate.dateWithTimeInterval_sinceDate_(0, Foundation.NSDate.date())
note.setDeliveryDate_(now)
centre = NSUserNotificationCenter.defaultUserNotificationCenter()
centre.scheduleNotification_(note)
def notify_build_done(elapsed):
"""Generate desktop notification when build is complete and the
elapsed build time was longer than 30 seconds."""
if elapsed > 30:
notify("Servo build", "Completed in %0.2fs" % elapsed)
def notify(title, text):
"""Generate a desktop notification using appropriate means on
supported platforms Linux, Windows, and Mac OS. On unsupported
platforms, this function acts as a no-op."""
platforms = {
"linux": notify_linux,
"win": notify_win,
"darwin": notify_darwin
}
func = platforms.get(sys.platform)
if func is not None:
try:
func(title, text)
except Exception as e:
extra = getattr(e, "message", "")
print("[Warning] Could not generate notification!%s" % extra, file=sys.stderr)
@CommandProvider
class MachCommands(CommandBase):
@Command('build',
description='Build Servo',
category='build')
@CommandArgument('--target', '-t',
default=None,
help='Cross compile for given target platform')
@CommandArgument('--release', '-r',
action='store_true',
help='Build in release mode')
@CommandArgument('--dev', '-d',
action='store_true',
help='Build in development mode')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
@CommandArgument('--android',
default=None,
action='store_true',
help='Build for Android')
@CommandArgument('--debug-mozjs',
default=None,
action='store_true',
help='Enable debug assertions in mozjs')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
def build(self, target=None, release=False, dev=False, jobs=None,
android=None, verbose=False, debug_mozjs=False, params=None):
self.ensure_bootstrapped()
if android is None:
android = self.config["build"]["android"]
opts = params or []
features = []
base_path = path.join("components", "servo", "target")
release_path = path.join(base_path, "release", "servo")
dev_path = path.join(base_path, "debug", "servo")
release_exists = path.exists(release_path)
dev_exists = path.exists(dev_path)
if not (release or dev):
if self.config["build"]["mode"] == "dev":
dev = True
elif self.config["build"]["mode"] == "release":
release = True
elif release_exists and not dev_exists:
release = True
elif dev_exists and not release_exists:
dev = True
else:
print("Please specify either --dev (-d) for a development")
print(" build, or --release (-r) for an optimized build.")
sys.exit(1)
if release and dev:
print("Please specify either --dev or --release.")
sys.exit(1)
if release:
opts += ["--release"]
if target:
opts += ["--target", target]
if jobs is not None:
opts += ["-j", jobs]
if verbose:
opts += ["-v"]
if android:
# Ensure the APK builder submodule has been built first
apk_builder_dir = "support/android-rs-glue"
with cd(path.join(apk_builder_dir, "apk-builder")):
subprocess.call(["cargo", "build"], env=self.build_env())
opts += ["--target", "arm-linux-androideabi"]
if debug_mozjs or self.config["build"]["debug-mozjs"]:
features += ["script/debugmozjs"]
if is_headless_build():
opts += ["--no-default-features"]
features += ["headless"]
if android:
features += ["android_glue"]
if features:
opts += ["--features", "%s" % ' '.join(features)]
build_start = time()
env = self.build_env()
if android:
# Build OpenSSL for android
make_cmd = ["make"]
if jobs is not None:
make_cmd += ["-j" + jobs]
with cd(self.android_support_dir()):
status = subprocess.call(
make_cmd + ["-f", "openssl.makefile"],
env=self.build_env())
if status:
return status
openssl_dir = path.join(self.android_support_dir(), "openssl-1.0.1k")
env['OPENSSL_LIB_DIR'] = openssl_dir
env['OPENSSL_INCLUDE_DIR'] = path.join(openssl_dir, "include")
env['OPENSSL_STATIC'] = 'TRUE'
status = subprocess.call(
["cargo", "build"] + opts,
env=env, cwd=self.servo_crate())
elapsed = time() - build_start
# Generate Desktop Notification if elapsed-time > some threshold value
notify_build_done(elapsed)
print("Build completed in %0.2fs" % elapsed)
return status
@Command('build-cef',
description='Build the Chromium Embedding Framework library',
category='build')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('--release', '-r',
action='store_true',
help='Build in release mode')
def build_cef(self, jobs=None, verbose=False, release=False):
self.ensure_bootstrapped()
ret = None
opts = []
if jobs is not None:
opts += ["-j", jobs]
if verbose:
opts += ["-v"]
if release:
opts += ["--release"]
build_start = time()
with cd(path.join("ports", "cef")):
ret = subprocess.call(["cargo", "build"] + opts,
env=self.build_env())
elapsed = time() - build_start
# Generate Desktop Notification if elapsed-time > some threshold value
notify_build_done(elapsed)
print("CEF build completed in %0.2fs" % elapsed)
return ret
@Command('build-gonk',
description='Build the Gonk port',
category='build')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('--release', '-r',
action='store_true',
help='Build in release mode')
def build_gonk(self, jobs=None, verbose=False, release=False):
self.ensure_bootstrapped()
ret = None
opts = []
if jobs is not None:
opts += ["-j", jobs]
if verbose:
opts += ["-v"]
if release:
opts += ["--release"]
opts += ["--target", "arm-linux-androideabi"]
env = self.build_env(gonk=True)
build_start = time()
with cd(path.join("ports", "gonk")):
ret = subprocess.call(["cargo", "build"] + opts, env=env)
elapsed = time() - build_start
# Generate Desktop Notification if elapsed-time > some threshold value
notify_build_done(elapsed)
print("Gonk build completed in %0.2fs" % elapsed)
return ret
@Command('build-tests',
description='Build the Servo test suites',
category='build')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
def build_tests(self, jobs=None):
self.ensure_bootstrapped()
args = ["cargo", "test", "--no-run"]
if is_headless_build():
args += ["--no-default-features", "--features", "headless"]
return subprocess.call(
args,
env=self.build_env(), cwd=self.servo_crate())
@Command('clean',
description='Clean the build directory.',
category='build')
@CommandArgument('--manifest-path',
default=None,
help='Path to the manifest to the package to clean')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
def clean(self, manifest_path, params, verbose=False):
self.ensure_bootstrapped()
opts = []
if manifest_path:
opts += ["--manifest-path", manifest_path]
if verbose:
opts += ["-v"]
opts += params
return subprocess.call(["cargo", "clean"] + opts,
env=self.build_env(), cwd=self.servo_crate())
| mpl-2.0 |
NumCosmo/NumCosmo | examples/example_ode_spline.py | 1 | 1165 | #!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
import ctypes
from math import *
from gi.repository import NumCosmoMath as Ncm
from gi.repository import NumCosmo as Nc
from gi.repository import GObject
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
class TestClass (Ncm.Model):
def __call__ (self, *args):
return args[0]
aas = TestClass ()
def test (y, x, data):
return y
test.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_char_p]
test.restype = ctypes.c_double
s = Ncm.SplineCubicNotaknot.new ()
os = Ncm.OdeSpline.new (s, test)
os.set_reltol (1.0e-3)
os.props.xi = 0.0
os.props.xf = 5.0
os.props.yi = 1.0
nhaca = [1,2,3,4]
os.prepare (id (nhaca))
ss = os.peek_spline()
for i in range (ss.len):
print ("%d % 22.15g % 22.15g % 22.15g % 22.15g % 22.15g" % (i, ss.xv.get (i), ss.yv.get (i), ss.b.get (i), ss.c.get(i), ss.d.get(i)))
#for i in range (100):
# x = 1.0 / 99.0 * i
# expx = exp (x)
# odex = ss.eval (x)
# print (x, expx, odex, fabs ((expx - odex) / expx))
| gpl-3.0 |
neilhan/tensorflow | tensorflow/python/training/slot_creator.py | 27 | 4034 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
def _create_slot_var(primary, val, scope):
"""Helper function for creating a slot variable."""
slot = variables.Variable(val, name=scope, trainable=False)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
real_slot_name = scope[len(primary.op.name + "/"):-1]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
with ops.name_scope(primary.op.name + "/" + name) as scope:
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, val, scope)
else:
return _create_slot_var(primary, val, scope)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 |
Geodan/natuurbandmodel | server-wps/wildfire_makelcp.py | 1 | 1431 | from geoserver.wps import process
from com.ziclix.python.sql import zxJDBC
jdbc_url = "jdbc:postgresql://192.168.40.5:3389/research"
username = "modeluser"
password = "modeluser"
driver = "org.postgresql.Driver"
cgi_url = "http://model.geodan.nl/main/gmi/cgi-bin/"
@process(
title='MakeLcp',
description='Build landscape file',
inputs={
'userid' : (int, 'User ID'),
'terreinid': (int,'Terrein ID'),
'landscapename': (str,'Name of landscape')
},
outputs={
'string': (str,'JSON string')
}
)
def run(userid, terreinid, landscapename):
#Connect to postgres
conn = zxJDBC.connect(jdbc_url,username, password, driver)
cur = conn.cursor()
query = """
INSERT INTO administration.runs ("user", model, status, percentage, lastupdate) VALUES (?,?,?, ?, now());
"""
data = [1,4,"scheduled",0]
cur.execute(query, data)
conn.commit()
query = """SELECT MAX(id) FROM administration.runs;"""
cur.execute(query)
result = cur.fetchone()
runid = result[0]
query = """
INSERT INTO administration.params_makelcp
(run, terrein_id, terrein_name)
VALUES
(?, ?, ?);
"""
data = [runid,terreinid,landscapename]
cur.execute(query, data )
conn.commit()
import subprocess
p = subprocess.Popen(['/usr/bin/curl','-u', 'demo:demo',cgi_url+'makeLcp.py'])
p.daemon = True
return '{"runid":'+str(runid)+',"status":"scheduled","percentage":0}'
| mit |
mindnervestech/mnrp | addons/hr/hr.py | 141 | 20357 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.modules.module import get_module_resource
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class hr_employee_category(osv.Model):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "hr.employee.category"
_description = "Employee Category"
_columns = {
'name': fields.char("Employee Tag", required=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('hr.employee.category', 'Parent Employee Tag', select=True),
'child_ids': fields.one2many('hr.employee.category', 'parent_id', 'Child Categories'),
'employee_ids': fields.many2many('hr.employee', 'employee_category_rel', 'category_id', 'emp_id', 'Employees'),
}
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct parent_id from hr_employee_category where id IN %s', (tuple(ids), ))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error! You cannot create recursive Categories.', ['parent_id'])
]
class hr_job(osv.Model):
def _get_nbr_employees(self, cr, uid, ids, name, args, context=None):
res = {}
for job in self.browse(cr, uid, ids, context=context):
nb_employees = len(job.employee_ids or [])
res[job.id] = {
'no_of_employee': nb_employees,
'expected_employees': nb_employees + job.no_of_recruitment,
}
return res
def _get_job_position(self, cr, uid, ids, context=None):
res = []
for employee in self.pool.get('hr.employee').browse(cr, uid, ids, context=context):
if employee.job_id:
res.append(employee.job_id.id)
return res
_name = "hr.job"
_description = "Job Position"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'name': fields.char('Job Name', required=True, select=True),
'expected_employees': fields.function(_get_nbr_employees, string='Total Forecasted Employees',
help='Expected number of employees for this job position after new recruitment.',
store = {
'hr.job': (lambda self,cr,uid,ids,c=None: ids, ['no_of_recruitment'], 10),
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_employee': fields.function(_get_nbr_employees, string="Current Number of Employees",
help='Number of employees currently occupying this job position.',
store = {
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_recruitment': fields.integer('Expected New Employees', copy=False,
help='Number of new employees you expect to recruit.'),
'no_of_hired_employee': fields.integer('Hired Employees', copy=False,
help='Number of hired employees for this job position during recruitment phase.'),
'employee_ids': fields.one2many('hr.employee', 'job_id', 'Employees', groups='base.group_user'),
'description': fields.text('Job Description'),
'requirements': fields.text('Requirements'),
'department_id': fields.many2one('hr.department', 'Department'),
'company_id': fields.many2one('res.company', 'Company'),
'state': fields.selection([('open', 'Recruitment Closed'), ('recruit', 'Recruitment in Progress')],
string='Status', readonly=True, required=True,
track_visibility='always', copy=False,
help="By default 'Closed', set it to 'In Recruitment' if recruitment process is going on for this job position."),
'write_date': fields.datetime('Update Date', readonly=True),
}
_defaults = {
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.job', context=ctx),
'state': 'open',
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, department_id)', 'The name of the job position must be unique per department in company!'),
('hired_employee_check', "CHECK ( no_of_hired_employee <= no_of_recruitment )", "Number of hired employee must be less than expected number of employee in recruitment."),
]
def set_recruit(self, cr, uid, ids, context=None):
for job in self.browse(cr, uid, ids, context=context):
no_of_recruitment = job.no_of_recruitment == 0 and 1 or job.no_of_recruitment
self.write(cr, uid, [job.id], {'state': 'recruit', 'no_of_recruitment': no_of_recruitment}, context=context)
return True
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'open',
'no_of_recruitment': 0,
'no_of_hired_employee': 0
}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if 'name' not in default:
job = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (job.name)
return super(hr_job, self).copy(cr, uid, id, default=default, context=context)
# ----------------------------------------
# Compatibility methods
# ----------------------------------------
_no_of_employee = _get_nbr_employees # v7 compatibility
job_open = set_open # v7 compatibility
job_recruitment = set_recruit # v7 compatibility
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_order = 'name_related'
_inherits = {'resource.resource': "resource_id"}
_inherit = ['mail.thread']
_mail_post_access = 'read'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
#we need a related field in order to be able to sort the employee by name
'name_related': fields.related('resource_id', 'name', type='char', string='Name', readonly=True, store=True),
'country_id': fields.many2one('res.country', 'Nationality'),
'birthday': fields.date("Date of Birth"),
'ssnid': fields.char('SSN No', help='Social Security Number'),
'sinid': fields.char('SIN No', help="Social Insurance Number"),
'identification_id': fields.char('Identification No'),
'otherid': fields.char('Other Id'),
'gender': fields.selection([('male', 'Male'), ('female', 'Female')], 'Gender'),
'marital': fields.selection([('single', 'Single'), ('married', 'Married'), ('widower', 'Widower'), ('divorced', 'Divorced')], 'Marital Status'),
'department_id': fields.many2one('hr.department', 'Department'),
'address_id': fields.many2one('res.partner', 'Working Address'),
'address_home_id': fields.many2one('res.partner', 'Home Address'),
'bank_account_id': fields.many2one('res.partner.bank', 'Bank Account Number', domain="[('partner_id','=',address_home_id)]", help="Employee bank salary account"),
'work_phone': fields.char('Work Phone', readonly=False),
'mobile_phone': fields.char('Work Mobile', readonly=False),
'work_email': fields.char('Work Email', size=240),
'work_location': fields.char('Office Location'),
'notes': fields.text('Notes'),
'parent_id': fields.many2one('hr.employee', 'Manager'),
'category_ids': fields.many2many('hr.employee.category', 'employee_category_rel', 'emp_id', 'category_id', 'Tags'),
'child_ids': fields.one2many('hr.employee', 'parent_id', 'Subordinates'),
'resource_id': fields.many2one('resource.resource', 'Resource', ondelete='cascade', required=True, auto_join=True),
'coach_id': fields.many2one('hr.employee', 'Coach'),
'job_id': fields.many2one('hr.job', 'Job Title'),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the employee, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the employee. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the employee. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'passport_id': fields.char('Passport No'),
'color': fields.integer('Color Index'),
'city': fields.related('address_id', 'city', type='char', string='City'),
'login': fields.related('user_id', 'login', type='char', string='Login', readonly=1),
'last_login': fields.related('user_id', 'date', type='datetime', string='Latest Connection', readonly=1),
}
def _get_default_image(self, cr, uid, context=None):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
defaults = {
'active': 1,
'image': _get_default_image,
'color': 0,
}
def _broadcast_welcome(self, cr, uid, employee_id, context=None):
""" Broadcast the welcome message to all users in the employee company. """
employee = self.browse(cr, uid, employee_id, context=context)
partner_ids = []
_model, group_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_user')
if employee.user_id:
company_id = employee.user_id.company_id.id
elif employee.company_id:
company_id = employee.company_id.id
elif employee.job_id:
company_id = employee.job_id.company_id.id
elif employee.department_id:
company_id = employee.department_id.company_id.id
else:
company_id = self.pool['res.company']._company_default_get(cr, uid, 'hr.employee', context=context)
res_users = self.pool['res.users']
user_ids = res_users.search(
cr, SUPERUSER_ID, [
('company_id', '=', company_id),
('groups_id', 'in', group_id)
], context=context)
partner_ids = list(set(u.partner_id.id for u in res_users.browse(cr, SUPERUSER_ID, user_ids, context=context)))
self.message_post(
cr, uid, [employee_id],
body=_('Welcome to %s! Please help him/her take the first steps with Odoo!') % (employee.name),
partner_ids=partner_ids,
subtype='mail.mt_comment', context=context
)
return True
def create(self, cr, uid, data, context=None):
context = dict(context or {})
if context.get("mail_broadcast"):
context['mail_create_nolog'] = True
employee_id = super(hr_employee, self).create(cr, uid, data, context=context)
if context.get("mail_broadcast"):
self._broadcast_welcome(cr, uid, employee_id, context=context)
return employee_id
def unlink(self, cr, uid, ids, context=None):
resource_ids = []
for employee in self.browse(cr, uid, ids, context=context):
resource_ids.append(employee.resource_id.id)
super(hr_employee, self).unlink(cr, uid, ids, context=context)
return self.pool.get('resource.resource').unlink(cr, uid, resource_ids, context=context)
def onchange_address_id(self, cr, uid, ids, address, context=None):
if address:
address = self.pool.get('res.partner').browse(cr, uid, address, context=context)
return {'value': {'work_phone': address.phone, 'mobile_phone': address.mobile}}
return {'value': {}}
def onchange_company(self, cr, uid, ids, company, context=None):
address_id = False
if company:
company_id = self.pool.get('res.company').browse(cr, uid, company, context=context)
address = self.pool.get('res.partner').address_get(cr, uid, [company_id.partner_id.id], ['default'])
address_id = address and address['default'] or False
return {'value': {'address_id': address_id}}
def onchange_department_id(self, cr, uid, ids, department_id, context=None):
value = {'parent_id': False}
if department_id:
department = self.pool.get('hr.department').browse(cr, uid, department_id)
value['parent_id'] = department.manager_id.id
return {'value': value}
def onchange_user(self, cr, uid, ids, user_id, context=None):
work_email = False
if user_id:
work_email = self.pool.get('res.users').browse(cr, uid, user_id, context=context).email
return {'value': {'work_email': work_email}}
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of employees if display_employees_suggestions if the
user perference allows it. """
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_employees_suggestions:
return []
else:
return super(hr_employee, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Overwrite of the original method to always follow user_id field,
even when not track_visibility so that a user will follow it's employee
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id FROM hr_employee WHERE id IN %s AND parent_id!=id',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error! You cannot create recursive hierarchy of Employee(s).', ['parent_id']),
]
class hr_department(osv.osv):
def _dept_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "hr.department"
_columns = {
'name': fields.char('Department Name', required=True),
'complete_name': fields.function(_dept_name_get_fnc, type="char", string='Name'),
'company_id': fields.many2one('res.company', 'Company', select=True, required=False),
'parent_id': fields.many2one('hr.department', 'Parent Department', select=True),
'child_ids': fields.one2many('hr.department', 'parent_id', 'Child Departments'),
'manager_id': fields.many2one('hr.employee', 'Manager'),
'member_ids': fields.one2many('hr.employee', 'department_id', 'Members', readonly=True),
'jobs_ids': fields.one2many('hr.job', 'department_id', 'Jobs'),
'note': fields.text('Note'),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.department', context=c),
}
def _check_recursion(self, cr, uid, ids, context=None):
if context is None:
context = {}
level = 100
while len(ids):
cr.execute('select distinct parent_id from hr_department where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error! You cannot create recursive departments.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
class res_users(osv.osv):
_name = 'res.users'
_inherit = 'res.users'
_columns = {
'employee_ids': fields.one2many('hr.employee', 'user_id', 'Related employees'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TimYi/django | django/views/decorators/clickjacking.py | 335 | 1744 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options') is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause |
grengojbo/st2 | st2api/st2api/controllers/v1/runnertypes.py | 7 | 2642 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mongoengine import ValidationError
from pecan import abort
from pecan.rest import RestController
import six
from st2common import log as logging
from st2common.models.api.base import jsexpose
from st2common.models.api.action import RunnerTypeAPI
from st2common.persistence.runner import RunnerType
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class RunnerTypesController(RestController):
"""
Implements the RESTful web endpoint that handles
the lifecycle of an RunnerType in the system.
"""
@staticmethod
def __get_by_id(id):
try:
return RunnerType.get_by_id(id)
except (ValueError, ValidationError) as e:
msg = 'Database lookup for id="%s" resulted in exception. %s' % (id, e)
LOG.exception(msg)
abort(http_client.NOT_FOUND, msg)
@staticmethod
def __get_by_name(name):
try:
return [RunnerType.get_by_name(name)]
except ValueError as e:
LOG.debug('Database lookup for name="%s" resulted in exception : %s.', name, e)
return []
@jsexpose(arg_types=[str])
def get_one(self, id):
"""
List RunnerType objects by id.
Handle:
GET /runnertypes/1
"""
runnertype_db = RunnerTypesController.__get_by_id(id)
runnertype_api = RunnerTypeAPI.from_model(runnertype_db)
return runnertype_api
@jsexpose(arg_types=[str])
def get_all(self, **kw):
"""
List all RunnerType objects.
Handles requests:
GET /runnertypes/
"""
runnertype_dbs = RunnerType.get_all(**kw)
runnertype_apis = [RunnerTypeAPI.from_model(runnertype_db)
for runnertype_db in runnertype_dbs]
return runnertype_apis
| apache-2.0 |
odoo-turkiye/odoo | addons/account_sequence/__init__.py | 433 | 1104 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_sequence
import account_sequence_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openhatch/oh-mainline | vendor/packages/twisted/twisted/conch/client/agent.py | 69 | 1730 | # -*- test-case-name: twisted.conch.test.test_default -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Accesses the key agent for user authentication.
Maintainer: Paul Swartz
"""
import os
from twisted.conch.ssh import agent, channel, keys
from twisted.internet import protocol, reactor
from twisted.python import log
class SSHAgentClient(agent.SSHAgentClient):
def __init__(self):
agent.SSHAgentClient.__init__(self)
self.blobs = []
def getPublicKeys(self):
return self.requestIdentities().addCallback(self._cbPublicKeys)
def _cbPublicKeys(self, blobcomm):
log.msg('got %i public keys' % len(blobcomm))
self.blobs = [x[0] for x in blobcomm]
def getPublicKey(self):
"""
Return a L{Key} from the first blob in C{self.blobs}, if any, or
return C{None}.
"""
if self.blobs:
return keys.Key.fromString(self.blobs.pop(0))
return None
class SSHAgentForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal)
d = cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
d.addCallback(self._cbGotLocal)
d.addErrback(lambda x:self.loseConnection())
self.buf = ''
def _cbGotLocal(self, local):
self.local = local
self.dataReceived = self.local.transport.write
self.local.dataReceived = self.write
def dataReceived(self, data):
self.buf += data
def closed(self):
if self.local:
self.local.loseConnection()
self.local = None
class SSHAgentForwardingLocal(protocol.Protocol):
pass
| agpl-3.0 |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/EXT/cmyka.py | 1 | 1439 | '''OpenGL extension EXT.cmyka
This module customises the behaviour of the
OpenGL.raw.GL.EXT.cmyka to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a simple method for OpenGL to read and store
images whose pixels have CMYK or CMYKA formats. The algorithms used to
convert to RGBA from CMYKA and to convert back from RGBA to CMYKA are of
the "black-box" nature, meaning that the application has little control
over how the conversion is done. Also, this black-box mechanism is
available only for transfers to or from memory, not for internal copies
of pixel data (such as invoked by CopyPixels, CopyTexImage1D, etc.)
However, the defined mechanism nicely handles 5-component CMYKA images,
and it is very easy to use.
A more configurable and potentially higher quality color conversion can
be implemented using the color tables, the color matrix, and possibly 3D
and 4D texture lookup. Such a color conversion also applies to copied
pixel data.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/cmyka.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.cmyka import *
### END AUTOGENERATED SECTION
from OpenGL import images as _i
_i.COMPONENT_COUNTS[ GL_CMYK_EXT ] = 4
_i.COMPONENT_COUNTS[ GL_CMYKA_EXT ] = 5
| bsd-2-clause |
RobertoMalatesta/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py | 124 | 3268 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for changedlinepattern.py.'''
import re
import unittest2 as unittest
from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
class ChangedLinePatternTest(unittest.TestCase):
# A quick note about the diff file structure.
# The first column indicated the old line number.
# The second column indicates the new line number.
# 0 in either column indicates it had no old or new line number.
_DIFF_FILE = ((0, 1, 'hi'),
(1, 0, 'bye'),
(2, 2, 'other'),
(3, 0, 'both'),
(0, 3, 'both'),
)
def run_changed_line_pattern_match(self, pattern, index_for_zero_value):
return ChangedLinePattern(re.compile(pattern), index_for_zero_value).match(None, self._DIFF_FILE)
def test_added_lines(self):
self.assertTrue(self.run_changed_line_pattern_match('hi', 0))
self.assertTrue(self.run_changed_line_pattern_match('h.', 0))
self.assertTrue(self.run_changed_line_pattern_match('both', 0))
self.assertFalse(self.run_changed_line_pattern_match('bye', 0))
self.assertFalse(self.run_changed_line_pattern_match('y', 0))
self.assertFalse(self.run_changed_line_pattern_match('other', 0))
def test_removed_lines(self):
self.assertFalse(self.run_changed_line_pattern_match('hi', 1))
self.assertFalse(self.run_changed_line_pattern_match('h.', 1))
self.assertTrue(self.run_changed_line_pattern_match('both', 1))
self.assertTrue(self.run_changed_line_pattern_match('bye', 1))
self.assertTrue(self.run_changed_line_pattern_match('y', 1))
self.assertFalse(self.run_changed_line_pattern_match('other', 1))
| bsd-3-clause |
neerja28/Tempest | tempest/api/object_storage/test_account_services.py | 4 | 14435 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
from tempest_lib.common.utils import data_utils
import testtools
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import config
from tempest import test
CONF = config.CONF
class AccountTest(base.BaseObjectTest):
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
containers = []
@classmethod
def setup_credentials(cls):
super(AccountTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@classmethod
def resource_setup(cls):
super(AccountTest, cls).resource_setup()
for i in moves.xrange(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
@classmethod
def resource_cleanup(cls):
cls.delete_containers(cls.containers)
super(AccountTest, cls).resource_cleanup()
@test.attr(type='smoke')
@test.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
# list of all containers should not be empty
resp, container_list = self.account_client.list_account_containers()
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
for container_name in self.containers:
self.assertIn(container_name, container_list)
@test.idempotent_id('884ec421-fbad-4fcc-916b-0580f2699565')
def test_list_no_containers(self):
# List request to empty account
# To test listing no containers, create new user other than
# the base user of this instance.
resp, container_list = \
self.os_operator.account_client.list_account_containers()
# When sending a request to an account which has not received a PUT
# container request, the response does not contain 'accept-ranges'
# header. This is a special case, therefore the existence of response
# headers is checked without custom matcher.
self.assertIn('content-length', resp)
self.assertIn('x-timestamp', resp)
self.assertIn('x-account-bytes-used', resp)
self.assertIn('x-account-container-count', resp)
self.assertIn('x-account-object-count', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
self.assertEqual(len(container_list), 0)
@test.idempotent_id('1c7efa35-e8a2-4b0b-b5ff-862c7fd83704')
def test_list_containers_with_format_json(self):
# list containers setting format parameter to 'json'
params = {'format': 'json'}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
self.assertTrue([c['name'] for c in container_list])
self.assertTrue([c['count'] for c in container_list])
self.assertTrue([c['bytes'] for c in container_list])
@test.idempotent_id('4477b609-1ca6-4d4b-b25d-ad3f01086089')
def test_list_containers_with_format_xml(self):
# list containers setting format parameter to 'xml'
params = {'format': 'xml'}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertIsNotNone(container_list)
self.assertEqual(container_list.tag, 'account')
self.assertTrue('name' in container_list.keys())
self.assertEqual(container_list.find(".//container").tag, 'container')
self.assertEqual(container_list.find(".//name").tag, 'name')
self.assertEqual(container_list.find(".//count").tag, 'count')
self.assertEqual(container_list.find(".//bytes").tag, 'bytes')
@test.idempotent_id('6eb04a6a-4860-4e31-ba91-ea3347d76b58')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.discoverability,
'Discoverability function is disabled')
def test_list_extensions(self):
resp, extensions = self.account_client.list_extensions()
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
@test.idempotent_id('5cfa4ab2-4373-48dd-a41f-a532b12b08b2')
def test_list_containers_with_limit(self):
# list containers one of them, half of them then all of them
for limit in (1, self.containers_count / 2, self.containers_count):
params = {'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), limit)
@test.idempotent_id('638f876d-6a43-482a-bbb3-0840bca101c6')
def test_list_containers_with_marker(self):
# list containers using marker param
# first expect to get 0 container as we specified last
# the container as marker
# second expect to get the bottom half of the containers
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), 0)
params = {'marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count / 2 - 1)
@test.idempotent_id('5ca164e4-7bde-43fa-bafb-913b53b9e786')
def test_list_containers_with_end_marker(self):
# list containers using end_marker param
# first expect to get 0 container as we specified first container as
# end_marker
# second expect to get the top half of the containers
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), 0)
params = {'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count / 2)
@test.idempotent_id('ac8502c2-d4e4-4f68-85a6-40befea2ef5e')
def test_list_containers_with_marker_and_end_marker(self):
# list containers combining marker and end_marker param
params = {'marker': self.containers[0],
'end_marker': self.containers[self.containers_count - 1]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list), self.containers_count - 2)
@test.idempotent_id('f7064ae8-dbcc-48da-b594-82feef6ea5af')
def test_list_containers_with_limit_and_marker(self):
# list containers combining marker and limit param
# result are always limitated by the limit whatever the marker
for marker in random.choice(self.containers):
limit = random.randint(0, self.containers_count - 1)
params = {'marker': marker,
'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertTrue(len(container_list) <= limit, str(container_list))
@test.idempotent_id('888a3f0e-7214-4806-8e50-5e0c9a69bb5e')
def test_list_containers_with_limit_and_end_marker(self):
# list containers combining limit and end_marker param
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list),
min(limit, self.containers_count / 2))
@test.idempotent_id('8cf98d9c-e3a0-4e44-971b-c87656fdddbd')
def test_list_containers_with_limit_and_marker_and_end_marker(self):
# list containers combining limit, marker and end_marker param
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
'marker': self.containers[0],
'end_marker': self.containers[self.containers_count - 1]}
resp, container_list = self.account_client.list_account_containers(
params=params)
self.assertHeaders(resp, 'Account', 'GET')
self.assertEqual(len(container_list),
min(limit, self.containers_count - 2))
@test.attr(type='smoke')
@test.idempotent_id('4894c312-6056-4587-8d6f-86ffbf861f80')
def test_list_account_metadata(self):
# list all account metadata
# set metadata to account
metadata = {'test-account-meta1': 'Meta1',
'test-account-meta2': 'Meta2'}
resp, _ = self.account_client.create_account_metadata(metadata)
resp, _ = self.account_client.list_account_metadata()
self.assertHeaders(resp, 'Account', 'HEAD')
self.assertIn('x-account-meta-test-account-meta1', resp)
self.assertIn('x-account-meta-test-account-meta2', resp)
self.account_client.delete_account_metadata(metadata)
@test.idempotent_id('b904c2e3-24c2-4dba-ad7d-04e90a761be5')
def test_list_no_account_metadata(self):
# list no account metadata
resp, _ = self.account_client.list_account_metadata()
self.assertHeaders(resp, 'Account', 'HEAD')
self.assertNotIn('x-account-meta-', str(resp))
@test.idempotent_id('e2a08b5f-3115-4768-a3ee-d4287acd6c08')
def test_update_account_metadata_with_create_metadata(self):
# add metadata to account
metadata = {'test-account-meta1': 'Meta1'}
resp, _ = self.account_client.create_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, body = self.account_client.list_account_metadata()
self.assertIn('x-account-meta-test-account-meta1', resp)
self.assertEqual(resp['x-account-meta-test-account-meta1'],
metadata['test-account-meta1'])
self.account_client.delete_account_metadata(metadata)
@test.idempotent_id('9f60348d-c46f-4465-ae06-d51dbd470953')
def test_update_account_metadata_with_delete_matadata(self):
# delete metadata from account
metadata = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata)
resp, _ = self.account_client.delete_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.idempotent_id('64fd53f3-adbd-4639-af54-436e4982dbfb')
def test_update_account_metadata_with_create_matadata_key(self):
# if the value of metadata is not set, the metadata is not
# registered at a server
metadata = {'test-account-meta1': ''}
resp, _ = self.account_client.create_account_metadata(metadata)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.idempotent_id('d4d884d3-4696-4b85-bc98-4f57c4dd2bf1')
def test_update_account_metadata_with_delete_matadata_key(self):
# Although the value of metadata is not set, the feature of
# deleting metadata is valid
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata_1)
metadata_2 = {'test-account-meta1': ''}
resp, _ = self.account_client.delete_account_metadata(metadata_2)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
@test.idempotent_id('8e5fc073-59b9-42ee-984a-29ed11b2c749')
def test_update_account_metadata_with_create_and_delete_metadata(self):
# Send a request adding and deleting metadata requests simultaneously
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_account_metadata(metadata_1)
metadata_2 = {'test-account-meta2': 'Meta2'}
resp, body = self.account_client.create_and_delete_account_metadata(
metadata_2,
metadata_1)
self.assertHeaders(resp, 'Account', 'POST')
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-test-account-meta1', resp)
self.assertIn('x-account-meta-test-account-meta2', resp)
self.assertEqual(resp['x-account-meta-test-account-meta2'],
metadata_2['test-account-meta2'])
self.account_client.delete_account_metadata(metadata_2)
| apache-2.0 |
andymckay/olympia | src/olympia/stats/tasks.py | 4 | 13311 | import datetime
import httplib2
import itertools
from django.conf import settings
from django.db import connection
from django.db.models import Sum, Max
import commonware.log
from apiclient.discovery import build
from elasticsearch.helpers import bulk_index
from oauth2client.client import OAuth2Credentials
from olympia import amo
from olympia.amo import search as amo_search
from olympia.addons.models import Addon
from olympia.amo.celery import task
from olympia.bandwagon.models import Collection
from olympia.reviews.models import Review
from olympia.stats.models import Contribution
from olympia.users.models import UserProfile
from olympia.versions.models import Version
from . import search
from .models import (
AddonCollectionCount, CollectionCount, CollectionStats, DownloadCount,
ThemeUserCount, UpdateCount)
log = commonware.log.getLogger('z.task')
@task
def addon_total_contributions(*addons, **kw):
"Updates the total contributions for a given addon."
log.info('[%s@%s] Updating total contributions.' %
(len(addons), addon_total_contributions.rate_limit))
# Only count uuid=None; those are verified transactions.
stats = (Contribution.objects.filter(addon__in=addons, uuid=None)
.values_list('addon').annotate(Sum('amount')))
for addon, total in stats:
Addon.objects.filter(id=addon).update(total_contributions=total)
@task
def update_addons_collections_downloads(data, **kw):
log.info("[%s] Updating addons+collections download totals." %
(len(data)))
cursor = connection.cursor()
q = ("UPDATE addons_collections SET downloads=%s WHERE addon_id=%s "
"AND collection_id=%s;" * len(data))
cursor.execute(q,
list(itertools.chain.from_iterable(
[var['sum'], var['addon'], var['collection']]
for var in data)))
@task
def update_collections_total(data, **kw):
log.info("[%s] Updating collections' download totals." %
(len(data)))
for var in data:
(Collection.objects.filter(pk=var['collection_id'])
.update(downloads=var['sum']))
def get_profile_id(service, domain):
"""
Fetch the profile ID for the given domain.
"""
accounts = service.management().accounts().list().execute()
account_ids = [a['id'] for a in accounts.get('items', ())]
for account_id in account_ids:
webproperties = service.management().webproperties().list(
accountId=account_id).execute()
webproperty_ids = [p['id'] for p in webproperties.get('items', ())]
for webproperty_id in webproperty_ids:
profiles = service.management().profiles().list(
accountId=account_id,
webPropertyId=webproperty_id).execute()
for p in profiles.get('items', ()):
# sometimes GA includes "http://", sometimes it doesn't.
if '://' in p['websiteUrl']:
name = p['websiteUrl'].partition('://')[-1]
else:
name = p['websiteUrl']
if name == domain:
return p['id']
@task
def update_google_analytics(date, **kw):
creds_data = getattr(settings, 'GOOGLE_ANALYTICS_CREDENTIALS', None)
if not creds_data:
log.critical('Failed to update global stats: '
'GOOGLE_ANALYTICS_CREDENTIALS not set')
return
creds = OAuth2Credentials(
*[creds_data[k] for k in
('access_token', 'client_id', 'client_secret',
'refresh_token', 'token_expiry', 'token_uri',
'user_agent')])
h = httplib2.Http()
creds.authorize(h)
service = build('analytics', 'v3', http=h)
domain = getattr(settings,
'GOOGLE_ANALYTICS_DOMAIN', None) or settings.DOMAIN
profile_id = get_profile_id(service, domain)
if profile_id is None:
log.critical('Failed to update global stats: could not access a Google'
' Analytics profile for ' + domain)
return
datestr = date.strftime('%Y-%m-%d')
try:
data = service.data().ga().get(ids='ga:' + profile_id,
start_date=datestr,
end_date=datestr,
metrics='ga:visits').execute()
# Storing this under the webtrends stat name so it goes on the
# same graph as the old webtrends data.
p = ['webtrends_DailyVisitors', data['rows'][0][0], date]
except Exception, e:
log.critical(
'Fetching stats data for %s from Google Analytics failed: %s' % e)
return
try:
cursor = connection.cursor()
cursor.execute('REPLACE INTO global_stats (name, count, date) '
'values (%s, %s, %s)', p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
return
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
@task
def update_global_totals(job, date, **kw):
log.info('Updating global statistics totals (%s) for (%s)' % (job, date))
jobs = _get_daily_jobs(date)
jobs.update(_get_metrics_jobs(date))
num = jobs[job]()
q = """REPLACE INTO global_stats (`name`, `count`, `date`)
VALUES (%s, %s, %s)"""
p = [job, num or 0, date]
try:
cursor = connection.cursor()
cursor.execute(q, p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
def _get_daily_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the previous day.
"""
if not date:
date = datetime.date.today() - datetime.timedelta(days=1)
# Passing through a datetime would not generate an error,
# but would pass and give incorrect values.
if isinstance(date, datetime.datetime):
raise ValueError('This requires a valid date, not a datetime')
# Testing on lte created date doesn't get you todays date, you need to do
# less than next date. That's because 2012-1-1 becomes 2012-1-1 00:00
next_date = date + datetime.timedelta(days=1)
date_str = date.strftime('%Y-%m-%d')
extra = dict(where=['DATE(created)=%s'], params=[date_str])
# If you're editing these, note that you are returning a function! This
# cheesy hackery was done so that we could pass the queries to celery
# lazily and not hammer the db with a ton of these all at once.
stats = {
# Add-on Downloads
'addon_total_downloads': lambda: DownloadCount.objects.filter(
date__lt=next_date).aggregate(sum=Sum('count'))['sum'],
'addon_downloads_new': lambda: DownloadCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
# Add-on counts
'addon_count_new': Addon.objects.extra(**extra).count,
# Version counts
'version_count_new': Version.objects.extra(**extra).count,
# User counts
'user_count_total': UserProfile.objects.filter(
created__lt=next_date).count,
'user_count_new': UserProfile.objects.extra(**extra).count,
# Review counts
'review_count_total': Review.objects.filter(created__lte=date,
editorreview=0).count,
# We can't use "**extra" here, because this query joins on reviews
# itself, and thus raises the following error:
# "Column 'created' in where clause is ambiguous".
'review_count_new': Review.objects.filter(editorreview=0).extra(
where=['DATE(reviews.created)=%s'], params=[date_str]).count,
# Collection counts
'collection_count_total': Collection.objects.filter(
created__lt=next_date).count,
'collection_count_new': Collection.objects.extra(**extra).count,
'collection_addon_downloads': (
lambda: AddonCollectionCount.objects.filter(
date__lte=date).aggregate(sum=Sum('count'))['sum']),
}
# If we're processing today's stats, we'll do some extras. We don't do
# these for re-processed stats because they change over time (eg. add-ons
# move from sandbox -> public
if date == (datetime.date.today() - datetime.timedelta(days=1)):
stats.update({
'addon_count_experimental': Addon.objects.filter(
created__lte=date, status=amo.STATUS_UNREVIEWED,
disabled_by_user=0).count,
'addon_count_nominated': Addon.objects.filter(
created__lte=date, status=amo.STATUS_NOMINATED,
disabled_by_user=0).count,
'addon_count_public': Addon.objects.filter(
created__lte=date, status=amo.STATUS_PUBLIC,
disabled_by_user=0).count,
'addon_count_pending': Version.objects.filter(
created__lte=date, files__status=amo.STATUS_PENDING).count,
'collection_count_private': Collection.objects.filter(
created__lte=date, listed=0).count,
'collection_count_public': Collection.objects.filter(
created__lte=date, listed=1).count,
'collection_count_editorspicks': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_FEATURED).count,
'collection_count_normal': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_NORMAL).count,
})
return stats
def _get_metrics_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the last date metrics put something in the db.
"""
if not date:
date = UpdateCount.objects.aggregate(max=Max('date'))['max']
# If you're editing these, note that you are returning a function!
stats = {
'addon_total_updatepings': lambda: UpdateCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
'collector_updatepings': lambda: UpdateCount.objects.get(
addon=settings.ADDON_COLLECTOR_ID, date=date).count,
}
return stats
@task
def index_update_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = UpdateCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s updates for %s.' % (qs.count(), qs[0].date))
data = []
try:
for update in qs:
data.append(search.extract_update_count(update))
bulk_index(es, data, index=index,
doc_type=UpdateCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_update_counts.retry(args=[ids, index], exc=exc, **kw)
raise
@task
def index_download_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = DownloadCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s downloads for %s.' % (qs.count(), qs[0].date))
try:
data = []
for dl in qs:
data.append(search.extract_download_count(dl))
bulk_index(es, data, index=index,
doc_type=DownloadCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_download_counts.retry(args=[ids, index], exc=exc)
raise
@task
def index_collection_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = CollectionCount.objects.filter(collection__in=ids)
if qs:
log.info('Indexing %s addon collection counts: %s'
% (qs.count(), qs[0].date))
data = []
try:
for collection_count in qs:
collection = collection_count.collection_id
filters = dict(collection=collection,
date=collection_count.date)
data.append(search.extract_addon_collection(
collection_count,
AddonCollectionCount.objects.filter(**filters),
CollectionStats.objects.filter(**filters)))
bulk_index(es, data, index=index,
doc_type=CollectionCount.get_mapping_type(),
refresh=True)
except Exception, exc:
index_collection_counts.retry(args=[ids], exc=exc)
raise
@task
def index_theme_user_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = ThemeUserCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s theme user counts for %s.'
% (qs.count(), qs[0].date))
data = []
try:
for user_count in qs:
data.append(search.extract_theme_user_count(user_count))
bulk_index(es, data, index=index,
doc_type=ThemeUserCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_theme_user_counts.retry(args=[ids], exc=exc, **kw)
raise
| bsd-3-clause |
chokribr/invenio | invenio/legacy/bibknowledge/admin.py | 13 | 31360 | # This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibKnowledge Administrator Interface."""
import os
import cgi
import sys
from invenio.legacy.bibknowledge import adminlib as bibknowledgeadminlib
from invenio.modules.knowledge import api as bibknowledge
from invenio.legacy.bibrank.adminlib import check_user
from invenio.legacy.webpage import page, error_page
from invenio.legacy.webuser import getUid, page_not_authorized
from invenio.base.i18n import wash_language, gettext_set_language
from invenio.utils.url import wash_url_argument, redirect_to_url
from invenio.config import CFG_SITE_LANG, CFG_SITE_SECURE_URL, \
CFG_SITE_NAME, CFG_WEBDIR
__lastupdated__ = """$Date$"""
def index(req, ln=CFG_SITE_LANG, search="", descriptiontoo=""):
"""
handle the bibknowledgeadmin.py/kb_manage call
@param search search for a substring in kb names
@param descriptiontoo .. and descriptions
"""
return kb_manage(req, ln, search, descriptiontoo)
def kb_manage(req, ln=CFG_SITE_LANG, search="", descriptiontoo=""):
"""
Main BibKnowledge administration page.
@param ln language
@param search search for a substring in kb names
@param descriptiontoo .. and descriptions
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
warnings = []
# Check if user is authorized to administer
# If not, still display page but offer to log in
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
is_admin = True
else:
is_admin = False
navtrail = '''<a class="navtrail" href="%s/help/admin">%s</a>''' % \
(CFG_SITE_SECURE_URL, _("Admin Area"))
if is_admin:
return page(title=_("BibKnowledge Admin"),
body=bibknowledgeadminlib.perform_request_knowledge_bases_management(ln=ln, search=search, descriptiontoo=descriptiontoo),
language=ln,
uid=uid,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req,
warnings=warnings)
else:
#redirect to login
return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail)
def kb_upload(req, kb, ln=CFG_SITE_LANG):
"""
Uploads file rdffile.
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail = '''<a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % \
(CFG_SITE_SECURE_URL, ln, _("Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
#get the form
form = req.form
#get the contents from the form
if 'file' not in form or not form['file'].filename:
return page(title=_("Cannot upload file"),
body = _("You have not selected a file to upload"),
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
fileitem = form['file']
uploaddir = CFG_WEBDIR+"/kbfiles"
#create a upload directory unless already exists
if os.path.isfile(uploaddir):
return page(title=_("Cannot upload file"),
body = "Cannot create directory " + \
uploaddir+" since it already" + \
" exists and it is a file",
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
if not os.path.isdir(uploaddir):
try:
os.mkdir(uploaddir)
except:
return page(title=_("Cannot upload file"),
body = "Cannot create directory "+uploaddir+ \
" maybe no access rights",
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req)
#if we are here we can try to write
#get the name and the file..
fn = str(kb_id)+".rdf"
open(uploaddir+"/"+fn, 'w').write(fileitem.read())
body = (_("File %(x_name)s uploaded.", x_name=('kbfiles/' + cgi.escape(fn))))
body += " <a href='"+CFG_SITE_SECURE_URL+"/kb'>%s</a>" % _("Back")
return(page(title=_("File uploaded"),
body = body,
language=ln,
navtrail = navtrail,
lastupdated=__lastupdated__,
req=req))
else:
return(page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail))
def kb_show(req, kb, sortby="to", ln=CFG_SITE_LANG, startat=0, search=""):
"""
Shows the content of the given knowledge base id. Check for authentication and kb existence.
Before displaying the content of the knowledge base, check if a form was submitted asking for
adding, editing or removing a value.
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
@param startat the number from which start showing mapping rules in kb
@param search search for this string in the kb
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = '''
> <a class="navtrail"
href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL,
ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %(x_name)s", x_name=kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show(ln=ln,
kb_id=kb_id, sortby=sortby, startat=startat,
search_term=search),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_show_attributes(req, kb, ln=CFG_SITE_LANG, sortby="to"):
"""
Shows the attributes (name, description) of a given kb
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %(x_name)s Attributes", x_name=kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show_attributes(ln=ln,
kb_id=kb_id,
sortby=sortby),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req, text=auth_msg,
navtrail=navtrail_previous_links)
def kb_dynamic_update(req, kb_id, field, expression, collection,
ln=CFG_SITE_LANG):
"""
Updates the configuration of a collection based KB by checking user
rights and calling bibknowledgeadminlib..
@param req request
@param kb_id knowledge base id
@param field configured field for this dynamic kb
@param expression search expression
@param collection search in this collection
@param ln language
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
#actual config call
err = bibknowledgeadminlib.perform_update_kb_config(kb_id, field,
expression,
collection)
if err:
return page(title=_("Error"),
body = err,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb_id)s" % {'ln':ln, 'kb_id': kb_id })
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_show_dependencies(req, kb, ln=CFG_SITE_LANG, sortby="to"):
"""
Shows the dependencies of a given kb
@param kb the kb id to show
@param ln language
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
uid = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
return page(title=_("Knowledge Base %(x_name)s Dependencies", x_name=kb_name),
body=bibknowledgeadminlib.perform_request_knowledge_base_show_dependencies(ln=ln,
kb_id=kb_id,
sortby=sortby),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_add_mapping(req, kb, mapFrom, mapTo, sortby="to", ln=CFG_SITE_LANG,
forcetype=None, replacements=None, kb_type=None):
"""
Adds a new mapping to a kb.
@param ln language
@param kb the kb id to show
@param sortby the sorting criteria ('from' or 'to')
@param forcetype indicates if this function should ask about replacing left/right sides (None or 'no')
replace in current kb ('curr') or in all ('all')
@param replacements an object containing kbname+++left+++right strings.
Can be a string or an array of strings
@param kb_type None for normal from-to kb's, 't' for taxonomies
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
key = wash_url_argument(mapFrom, 'str')
value = wash_url_argument(mapTo, 'str')
#check if key or value already exists in some KB
left_sides_match = bibknowledge.get_kb_mappings("", key, "")
#check that the match is exact
left_sides = []
for m in left_sides_match:
if m['key'] == key:
left_sides.append(m)
right_sides_match = bibknowledge.get_kb_mappings("", "", value)
right_sides = []
for m in right_sides_match:
if m['value'] == value:
right_sides.append(m)
if (len(right_sides) == 0) and (len(left_sides) == 0):
#no problems, just add in current
forcetype = "curr"
#likewise, if this is a taxonomy, just pass on
if kb_type == 't':
forcetype = "curr"
if forcetype and not forcetype == "no":
pass
else:
if len(left_sides) > 0:
return page(title=_("Left side exists"),
body = bibknowledgeadminlib.perform_request_verify_rule(ln, kb_id, key, value, "left", kb_name, left_sides),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if len(right_sides) > 0:
return page(title=_("Right side exists"),
body = bibknowledgeadminlib.perform_request_verify_rule(ln, kb_id, key, value, "right", kb_name, right_sides),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if forcetype == "curr":
bibknowledge.add_kb_mapping(kb_name, key, value)
if forcetype == "all":
#a bit tricky.. remove the rules given in param replacement and add the current
#rule in the same kb's
if replacements:
#"replacements" can be either a string or an array. Let's make it always an array
if type(replacements) == type("this is a string"):
mystr = replacements
replacements = []
replacements.append(mystr)
for r in replacements:
if r.find("++++") > 0:
(rkbname, rleft, dummy) = r.split('++++')
bibknowledge.remove_kb_mapping(rkbname, rleft)
#add only if this is not yet there..
if not bibknowledge.kb_mapping_exists(rkbname, key):
bibknowledge.add_kb_mapping(rkbname, key, value)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s&kb_type=%(kb_type)s" % {'ln':ln,
'kb':kb_id,
'sortby':sortby,
'kb_type':kb_type})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_edit_mapping(req, kb, key, mapFrom, mapTo,
update="", delete="", sortby="to", ln=CFG_SITE_LANG):
"""
Edit a mapping to in kb. Edit can be "update old value" or "delete existing value"
@param kb the knowledge base id to edit
@param key the key of the mapping that will be modified
@param mapFrom the new key of the mapping
@param mapTo the new value of the mapping
@param update contains a value if the mapping is to be updated
@param delete contains a value if the mapping is to be deleted
@param sortby the sorting criteria ('from' or 'to')
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
key = wash_url_argument(key, 'str')
if delete != "":
#Delete
bibknowledge.remove_kb_mapping(kb_name, key)
if update != "":
#Update
new_key = wash_url_argument(mapFrom, 'str')
new_value = wash_url_argument(mapTo, 'str')
bibknowledge.update_kb_mapping(kb_name, key, new_key, new_value)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def uniq(alist):
"""a simple uniquer, return unique members of the list"""
myset = {}
return [myset.setdefault(e, e) for e in alist if e not in myset]
def kb_update_attributes(req, kb="", name="", description="", sortby="to",
ln=CFG_SITE_LANG, chosen_option=None, kb_type=None):
"""
Update the attributes of the kb
@param ln language
@param kb the kb id to update
@param sortby the sorting criteria ('from' or 'to')
@param name the new name of the kn
@param description the new description of the kb
@param chosen_option set to dialog box value
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
if chosen_option is not None:
# Update could not be performed.
# Redirect to kb attributes page
redirect_to_url(req, "kb?ln=%(ln)s&action=attributes&kb=%(kb)s&sortby=%(sortby)s&kb_type=%(kb_type)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby, 'kb_type':kb_type})
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
new_name = wash_url_argument(name, 'str')
if kb_name != new_name and bibknowledge.kb_exists(new_name):
#A knowledge base with that name already exist
#Do not update
return dialog_box(req=req,
ln=ln,
title="Name already in use",
message="""<i>%s</i> cannot be renamed to %s:
Another knowledge base already has that name.
<br/>Please choose another name.""" % (kb_name,
new_name),
navtrail=navtrail_previous_links,
options=[ _("Ok")])
new_desc = wash_url_argument(description, 'str')
bibknowledge.update_kb_attributes(kb_name, new_name, new_desc)
redirect_to_url(req, "kb?ln=%(ln)s&kb=%(kb)s&sortby=%(sortby)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_export(req, kbname="", format="kbr", searchkey="", searchvalue="", searchtype="s", limit=None, ln=CFG_SITE_LANG):
"""
Exports the given kb so that it is listed in stdout (the browser).
@param req the request
@param kbname knowledge base name
@param expression evaluate this for the returned lines
@param format 'kba' for authority file, 'kbr' for leftside-rightside, json
for json-formatted dictionaries
@param searchkey include only lines that match this on the left side
@param searchvalue include only lines that match this on the right side
@param searchtype s = substring match, e = exact match
@param limit how many results to return. None means all
@param ln language
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
if not kbname:
return page(title=_("Knowledge base name missing"),
body = """Required parameter kbname
is missing.""",
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
#in order to make 'wget' downloads easy we do not require authorization
#first check the type of the KB
kbtype = None
kbinfo = None
kbid = None
kbinfos = bibknowledge.get_kbs_info("", kbname)
if kbinfos:
kbinfo = kbinfos[0]
kbtype = kbinfo['kbtype']
kbid = kbinfo['id']
else:
return page(title=_("Unknown knowledge base"),
body = _("There is no knowledge base with that name."),
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
if not kbtype or kbtype == 'w':
if format and format == "ejson":
req.content_type = 'application/json'
return bibknowledge.get_kb_mappings_embedded_json(kbname, searchkey, \
searchvalue, searchtype, limit)
elif format and format[0] == 'j':
# as JSON formatted string
req.content_type = 'application/json'
return bibknowledge.get_kb_mappings_json(kbname, searchkey, \
searchvalue, searchtype, limit)
# left side / right side KB
mappings = bibknowledge.get_kb_mappings(kbname, searchkey, \
searchvalue, searchtype)
if format == 'right' or format == 'kba':
# as authority sequence
seq = [m['value'] for m in mappings]
seq = uniq(sorted(seq))
for s in seq:
req.write(s+"\n");
return
else:
# as regularly formatted left-right mapping
for m in mappings:
req.write(m['key'] + '---' + m['value'] + '\n')
return
elif kbtype == 'd':
# dynamic kb, another interface for perform_request_search
if format and format[0] == 'j':
req.content_type = "application/json"
return bibknowledge.get_kbd_values_json(kbname, searchvalue)
else:
# print it as a list of values
for hit in bibknowledge.get_kbd_values(kbname, searchvalue):
req.write(hit + '\n')
req.write('\n')
return
elif kbtype == 't': #taxonomy: output the file
kbfilename = CFG_WEBDIR+"/kbfiles/"+str(kbid)+".rdf"
try:
f = open(kbfilename, 'r')
for line in f:
req.write(line)
f.close()
except:
req.write("Reading the file "+kbfilename+" failed.")
else:
# This situation should never happen
raise ValueError, "Unsupported KB Type: %s" % kbtype
def kb_add(req, ln=CFG_SITE_LANG, sortby="to", kbtype=""):
"""
Adds a new kb
@param req the request
@param ln language
@param sortby to or from
@param kbtype type of knowledge base. one of: "", taxonomy, dynamic
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
name = "Untitled"
if kbtype == "taxonomy":
name = "Untitled Taxonomy"
if kbtype == "dynamic":
name = "Untitled dynamic"
kb_id = bibknowledge.add_kb(kb_name=name.decode('utf-8'), kb_type=kbtype)
redirect_to_url(req, "kb?ln=%(ln)s&action=attributes&kb=%(kb)s" % {'ln':ln, 'kb':kb_id, 'sortby':sortby})
else:
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"))
return page_not_authorized(req=req,
text=auth_msg,
navtrail=navtrail_previous_links)
def kb_delete(req, kb, ln=CFG_SITE_LANG, chosen_option=""):
"""
Deletes an existing kb
@param kb the kb id to delete
"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb?ln=%s">%s</a> > %s''' % (CFG_SITE_SECURE_URL, ln, _("Manage Knowledge Bases"), _("Delete Knowledge Base"))
try:
dummy = getUid(req)
except:
return error_page('Error', req)
(auth_code, auth_msg) = check_user(req, 'cfgbibknowledge')
if not auth_code:
kb_id = wash_url_argument(kb, 'int')
kb_name = bibknowledge.get_kb_name(kb_id)
if kb_name is None:
return page(title=_("Unknown Knowledge Base"),
body = "",
language=ln,
navtrail = navtrail_previous_links,
errors = [("ERR_KB_ID_UNKNOWN", kb)],
lastupdated=__lastupdated__,
req=req)
#Ask confirmation to user if not already done
chosen_option = wash_url_argument(chosen_option, 'str')
if chosen_option == "":
return dialog_box(req=req,
ln=ln,
title="Delete %s" % kb_name,
message="""Are you sure you want to
delete knowledge base <i>%s</i>?""" % kb_name,
navtrail=navtrail_previous_links,
options=[_("Cancel"), _("Delete")])
elif chosen_option==_("Delete"):
bibknowledge.delete_kb(kb_name)
redirect_to_url(req, "kb?ln=%(ln)s" % {'ln':ln})
else:
navtrail_previous_links = ''' > <a class="navtrail" href="%s/kb">%s</a>''' % (CFG_SITE_SECURE_URL, _("Manage Knowledge Bases"))
return page_not_authorized(req=req, text=auth_msg,
navtrail=navtrail_previous_links)
def dialog_box(req, url="", ln=CFG_SITE_LANG, navtrail="",
title="", message="", options=None):
"""
Returns a dialog box with a given title, message and options.
Used for asking confirmation on actions.
The page that will receive the result must take 'chosen_option' as parameter.
@param url the url used to submit the options chosen by the user
@param options the list of labels for the buttons given as choice to user
"""
import invenio
bibformat_templates = invenio.legacy.template.load('bibformat')
if not options:
options = []
return page(title="",
body = bibformat_templates.tmpl_admin_dialog_box(url,
title,
message,
options),
language=ln,
lastupdated=__lastupdated__,
navtrail=navtrail,
req=req)
| gpl-2.0 |
leggitta/mne-python | mne/realtime/mockclient.py | 15 | 6139 | # Authors: Mainak Jas <mainak@neuro.hut.fi>
# Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import copy
import numpy as np
from ..event import find_events
class MockRtClient(object):
"""Mock Realtime Client
Parameters
----------
raw : instance of Raw object
The raw object which simulates the RtClient
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
def __init__(self, raw, verbose=None):
self.raw = raw
self.info = copy.deepcopy(self.raw.info)
self.verbose = verbose
self._current = dict() # pointer to current index for the event
self._last = dict() # Last index for the event
def get_measurement_info(self):
"""Returns the measurement info.
Returns
-------
self.info : dict
The measurement info.
"""
return self.info
def send_data(self, epochs, picks, tmin, tmax, buffer_size):
"""Read from raw object and send them to RtEpochs for processing.
Parameters
----------
epochs : instance of RtEpochs
The epochs object.
picks : array-like of int
Indices of channels.
tmin : float
Time instant to start receiving buffers.
tmax : float
Time instant to stop receiving buffers.
buffer_size : int
Size of each buffer in terms of number of samples.
"""
# this is important to emulate a thread, instead of automatically
# or constantly sending data, we will invoke this explicitly to send
# the next buffer
sfreq = self.info['sfreq']
tmin_samp = int(round(sfreq * tmin))
tmax_samp = int(round(sfreq * tmax))
iter_times = zip(list(range(tmin_samp, tmax_samp, buffer_size)),
list(range(buffer_size, tmax_samp, buffer_size)))
for ii, (start, stop) in enumerate(iter_times):
# channels are picked in _append_epoch_to_queue. No need to pick
# here
data, times = self.raw[:, start:stop]
# to undo the calibration done in _process_raw_buffer
cals = np.array([[self.info['chs'][k]['range'] *
self.info['chs'][k]['cal'] for k in picks]]).T
data[picks, :] = data[picks, :] / cals
epochs._process_raw_buffer(data)
# The following methods do not seem to be important for this use case,
# but they need to be present for the emulation to work because
# RtEpochs expects them to be there.
def get_event_data(self, event_id, tmin, tmax, picks, stim_channel=None,
min_duration=0):
"""Simulate the data for a particular event-id.
The epochs corresponding to a particular event-id are returned. The
method remembers the epoch that was returned in the previous call and
returns the next epoch in sequence. Once all epochs corresponding to
an event-id have been exhausted, the method returns None.
Parameters
----------
event_id : int
The id of the event to consider.
tmin : float
Start time before event.
tmax : float
End time after event.
picks : array-like of int
Indices of channels.
stim_channel : None | string | list of string
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will default to
'STI 014'.
min_duration : float
The minimum duration of a change in the events channel required
to consider it as an event (in seconds).
Returns
-------
data : 2D array with shape [n_channels, n_times]
The epochs that are being simulated
"""
# Get the list of all events
events = find_events(self.raw, stim_channel=stim_channel,
verbose=False, output='onset',
consecutive='increasing',
min_duration=min_duration)
# Get the list of only the specified event
idx = np.where(events[:, -1] == event_id)[0]
event_samp = events[idx, 0]
# Only do this the first time for each event type
if event_id not in self._current:
# Initialize pointer for the event to 0
self._current[event_id] = 0
self._last[event_id] = len(event_samp)
# relative start and stop positions in samples
tmin_samp = int(round(self.info['sfreq'] * tmin))
tmax_samp = int(round(self.info['sfreq'] * tmax)) + 1
if self._current[event_id] < self._last[event_id]:
# Select the current event from the events list
ev_samp = event_samp[self._current[event_id]]
# absolute start and stop positions in samples
start = ev_samp + tmin_samp - self.raw.first_samp
stop = ev_samp + tmax_samp - self.raw.first_samp
self._current[event_id] += 1 # increment pointer
data, _ = self.raw[picks, start:stop]
return data
else:
return None
def register_receive_callback(self, x):
"""API boilerplate
Parameters
----------
x : None
Not used.
"""
pass
def start_receive_thread(self, x):
"""API boilerplate
Parameters
----------
x : None
Not used.
"""
pass
def unregister_receive_callback(self, x):
"""API boilerplate
Parameters
----------
x : None
Not used.
"""
pass
def _stop_receive_thread(self):
"""API boilerplate"""
pass
| bsd-3-clause |
Celedhrim/persomov | libs/tornado/platform/select.py | 79 | 2633 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Select-based IOLoop implementation.
Used as a fallback for systems that don't support epoll or kqueue.
"""
from __future__ import absolute_import, division, print_function, with_statement
import select
from tornado.ioloop import IOLoop, PollIOLoop
class _Select(object):
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
def __init__(self):
self.read_fds = set()
self.write_fds = set()
self.error_fds = set()
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
def close(self):
pass
def register(self, fd, events):
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
raise IOError("fd %s already registered" % fd)
if events & IOLoop.READ:
self.read_fds.add(fd)
if events & IOLoop.WRITE:
self.write_fds.add(fd)
if events & IOLoop.ERROR:
self.error_fds.add(fd)
# Closed connections are reported as errors by epoll and kqueue,
# but as zero-byte reads by select, so when errors are requested
# we need to listen for both read and error.
#self.read_fds.add(fd)
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
self.read_fds.discard(fd)
self.write_fds.discard(fd)
self.error_fds.discard(fd)
def poll(self, timeout):
readable, writeable, errors = select.select(
self.read_fds, self.write_fds, self.error_fds, timeout)
events = {}
for fd in readable:
events[fd] = events.get(fd, 0) | IOLoop.READ
for fd in writeable:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
for fd in errors:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
class SelectIOLoop(PollIOLoop):
def initialize(self, **kwargs):
super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs)
| gpl-3.0 |
chaowu2009/stereo-vo | tools/capture_TwoCameras_saveImagesOnly.py | 1 | 2289 | import numpy as np
import cv2
import time
import matplotlib.pylab as plt
"""
Make sure that you hold the checkerboard horizontally (more checkers horizontally than vertically).
In order to get a good calibration you will need to move the checkerboard around in the camera frame such that:
the checkerboard is detected at the left and right edges of the field of view (X calibration)
the checkerboard is detected at the top and bottom edges of the field of view (Y calibration)
the checkerboard is detected at various angles to the camera ("Skew")
the checkerboard fills the entire field of view (Size calibration)
checkerboard tilted to the left, right, top and bottom (X,Y, and Size calibration)
"""
left = 1
right = 2
time_in_ms= 1000/100
#folder = "/home/cwu/Downloads/";
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
#folder = "D:/vision/stereo-calibration/calib_imgs/ARC/"
fp = open(folder + "timeStamp.txt","w")
WIDTH = 1280
HEIGHT = 720
WIDTH = 640
HEIGHT = 480
for counter in range(1,31):
millis = int(round(time.time() * 1000))
cap1 = cv2.VideoCapture(left)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame1 = cap1.read()
cap1.release()
cap2 = cv2.VideoCapture(right)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame2 = cap2.read()
cap2.release()
#frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
#frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
plt.subplot(121)
plt.imshow(frame1)
plt.title('left')
plt.subplot(122)
plt.imshow(frame2)
plt.title('right')
plt.show()
print('another capture', counter)
cv2.waitKey(100)
cv2.imwrite(folder + "img_left/left_" + str(counter) + ".jpg", frame1)
cv2.waitKey(time_in_ms)
cv2.imwrite(folder + "img_right/right_" + str(counter) + ".jpg", frame2)
fp.write(str(counter)+ ","+ str(millis) + "\n")
print("the ", counter, " pairs")
cv2.destroyAllWindows()
fp.close()
print('All Done \n')
| mit |
dnikulin/jula | scripts/make_classes.py | 1 | 3287 | # Copyright (C) 2011 Dmitri Nikulin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import SIZES, classname, mklabel, cells, startfile
def genmatrix(rows, cols):
myclass = classname(rows, cols)
fd = startfile("fixed", myclass)
def line(s=""):
print >> fd, s
traits = [("Matrix_%d_N" % rows), ("Matrix_M_%d" % cols)]
if (rows == cols):
traits.append("Matrix_M_M")
for trait in traits:
line("import org.dnikulin.jula.traits.%s;" % trait)
line()
line("import org.dnikulin.jula.functions.Copy;")
line()
line("public final class %s implements %s {" % (myclass, ", ".join(traits)))
line(" public static final int rows = %d;" % rows)
line(" public static final int cols = %d;" % cols)
line(" public static final int size = (rows * cols);")
line()
for row in range(rows):
labels = ", ".join([mklabel(row, col) for col in range(cols)])
line(" public double %s;" % labels)
line()
line(" @Override")
line(" public int getRows() {")
line(" return rows;")
line(" }")
line()
line(" @Override")
line(" public int getCols() {")
line(" return cols;")
line(" }")
line()
line(" @Override")
line(" public double get(final int row, final int col) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: return %s;" % (off, label))
line(" default: return 0;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public void set(final int row, final int col, final double val) {")
line(" assert(row >= 0);")
line(" assert(col >= 0);")
line(" assert(row < rows);")
line(" assert(col < cols);")
line()
line(" switch ((row * cols) + col) {")
for (row, col, label) in cells(rows, cols):
off = (row * cols) + col
line(" case %2d: %s = val; return;" % (off, label))
line(" default: return;")
line(" }")
line(" }")
line()
line(" @Override")
line(" public %s clone() {" % (myclass))
line(" final %s that = new %s();" % (myclass, myclass))
line(" Copy.copy(this, that);")
line(" return that;")
line(" }")
line("}")
fd.flush()
fd.close()
if __name__ == '__main__':
for rows in SIZES:
for cols in SIZES:
genmatrix(rows, cols)
| apache-2.0 |
biboc/RIOT | tests/struct_tm_utility/tests/01-run.py | 32 | 4017 | #!/usr/bin/env python3
# Copyright (C) 2017 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import calendar
import datetime
from testrunner import run
def _check_help(child):
child.sendline('help')
child.expect_exact('Command Description')
child.expect_exact('---------------------------------------')
child.expect_exact('days_in '
'Tells you the number of days in a month.')
child.expect_exact('leap_year '
'Tells you if a supplied year is a leap year.')
child.expect_exact('doomsday '
'Tells you the wday Doomsday of the supplied year.')
child.expect_exact('day '
'Tells you the day of the supplied date.')
def _check_days_in(child):
# verify usage
child.sendline('days_in')
child.expect_exact('Usage: days_in <Month[1..12]>')
# send an invalid month
child.sendline('days_in 13')
child.expect_exact('Usage: days_in <Month[1..12]>')
child.sendline('days_in 0')
child.expect_exact('Usage: days_in <Month[1..12]>')
year = 2017 # not a leap year so february has 28 days
for m in range(12):
days = calendar.monthrange(year, m + 1)[1]
mon = datetime.datetime(year, m + 1, 1).strftime('%b').upper()
child.sendline('days_in {}'.format(m + 1))
child.expect_exact('There are {} days in {} in common years.'
.format(days, mon))
def _check_leap_year(child):
# verify usage
child.sendline('leap_year')
child.expect_exact('Usage: leap_year <Year>')
# send an invalid year
child.sendline('leap_year aaaa')
child.expect_exact('Usage: leap_year <Year>')
for (year, leap) in ((2000, 'YES'),
(2016, 'YES'),
(2017, 'NO'),
(2018, 'NO')):
child.sendline('leap_year {}'.format(year))
child.expect_exact('Was {} a leap year? {}.'.format(year, leap))
def _check_doomsday(child):
# verify usage
child.sendline('doomsday')
child.expect_exact('Usage: doomsday <Year>')
for year in (2016, 2017):
dt = (datetime.datetime(year, 3, 1) - datetime.timedelta(days=1))
doomsday = dt.strftime('%a').upper()
child.sendline('doomsday {}'.format(year))
child.expect_exact('What weekday was MAR 0 of {}? {}.'
.format(year, doomsday))
def _check_day(child):
# verify usage
child.sendline('day')
child.expect_exact('Usage: day <Year> <Month[1..12]> <Day[1..31]>')
# loop over a list of valid dates
for year in (2017, 2018):
for month in (1, 4, 11):
for day in (1, 15, 28):
dt = datetime.datetime(year, month, day)
count = dt.timetuple().tm_yday
day_str = dt.strftime('%a').upper()
child.sendline('day {} {} {}'.format(year, month, day))
child.expect_exact('What weekday was {}-{:02}-{:02}? '
'The {}(th) day of the year was a {}.'
.format(year, month, day, count, day_str))
# 2016 is a leap year
child.sendline('day 2016 2 29')
child.expect_exact('What weekday was 2016-02-29? '
'The 60(th) day of the year was a MON.')
# 2017 is a leap year
child.sendline('day 2017 2 29')
child.expect_exact('The supplied date is invalid, '
'but no error should occur.')
def _wait_prompt(child):
child.sendline('')
child.expect('>')
def testfunc(child):
_wait_prompt(child)
_check_help(child)
_check_days_in(child)
_check_leap_year(child)
_check_doomsday(child)
_check_day(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
huntxu/python-neutronclient | neutronclient/neutron/v2_0/rbac.py | 3 | 3975 | # Copyright 2015 Huawei Technologies India Pvt Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient._i18n import _
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
# key=object_type: value={key=resource, value=cmd_resource}
RBAC_OBJECTS = {'network': {'network': 'network'},
'qos-policy': {'policy': 'qos_policy'}}
def _get_cmd_resource(obj_type):
resource = list(RBAC_OBJECTS[obj_type])[0]
cmd_resource = RBAC_OBJECTS[obj_type][resource]
return resource, cmd_resource
def get_rbac_obj_params(client, obj_type, obj_id_or_name):
resource, cmd_resource = _get_cmd_resource(obj_type)
obj_id = neutronV20.find_resourceid_by_name_or_id(
client=client, resource=resource, name_or_id=obj_id_or_name,
cmd_resource=cmd_resource)
return obj_id, cmd_resource
class ListRBACPolicy(neutronV20.ListCommand):
"""List RBAC policies that belong to a given tenant."""
resource = 'rbac_policy'
list_columns = ['id', 'object_type', 'object_id']
pagination_support = True
sorting_support = True
allow_names = False
class ShowRBACPolicy(neutronV20.ShowCommand):
"""Show information of a given RBAC policy."""
resource = 'rbac_policy'
allow_names = False
class CreateRBACPolicy(neutronV20.CreateCommand):
"""Create a RBAC policy for a given tenant."""
resource = 'rbac_policy'
def add_known_arguments(self, parser):
parser.add_argument(
'name',
metavar='RBAC_OBJECT',
help=_('ID or name of the RBAC object.'))
parser.add_argument(
'--type', choices=RBAC_OBJECTS.keys(),
required=True,
type=utils.convert_to_lowercase,
help=_('Type of the object that RBAC policy affects.'))
parser.add_argument(
'--target-tenant',
default='*',
help=_('ID of the tenant to which the RBAC '
'policy will be enforced.'))
parser.add_argument(
'--action', choices=['access_as_external', 'access_as_shared'],
type=utils.convert_to_lowercase,
required=True,
help=_('Action for the RBAC policy.'))
def args2body(self, parsed_args):
neutron_client = self.get_client()
_object_id, _object_type = get_rbac_obj_params(neutron_client,
parsed_args.type,
parsed_args.name)
body = {
'object_id': _object_id,
'object_type': _object_type,
'target_tenant': parsed_args.target_tenant,
'action': parsed_args.action,
}
return {self.resource: body}
class UpdateRBACPolicy(neutronV20.UpdateCommand):
"""Update RBAC policy for given tenant."""
resource = 'rbac_policy'
allow_names = False
def add_known_arguments(self, parser):
parser.add_argument(
'--target-tenant',
help=_('ID of the tenant to which the RBAC '
'policy will be enforced.'))
def args2body(self, parsed_args):
body = {'target_tenant': parsed_args.target_tenant}
return {self.resource: body}
class DeleteRBACPolicy(neutronV20.DeleteCommand):
"""Delete a RBAC policy."""
resource = 'rbac_policy'
allow_names = False
| apache-2.0 |
yask123/django | django/utils/timezone.py | 338 | 11027 | """
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
import sys
import time as _time
from datetime import datetime, timedelta, tzinfo
from threading import local
from django.conf import settings
from django.utils import lru_cache, six
from django.utils.decorators import ContextDecorator
try:
import pytz
except ImportError:
pytz = None
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time. Taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept as close as possible to the reference version. __init__ was added to
delay the computation of STDOFFSET, DSTOFFSET and DSTDIFF which is
performed at import time in the example.
Subclasses contain further improvements.
"""
def __init__(self):
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@lru_cache.lru_cache()
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
return pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
return LocalTimezone()
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| bsd-3-clause |
Canpio/Paddle | python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py | 7 | 2072 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-4, batch_size=1000)
seq = data_layer(name='seq_input', size=100)
sub_seq = data_layer(name='sub_seq_input', size=100)
lbl = data_layer(name='label', size=1)
def generate_rnn_simple(name):
def rnn_simple(s):
m = memory(name=name, size=200)
fc = fc_layer(input=[s, m], size=200, name=name)
return fc
return rnn_simple
def generate_rnn_simple_no_name():
def rnn_simple(s):
m = memory(name=None, size=200)
fc = fc_layer(input=[s, m], size=200)
m.set_input(fc)
return fc
return rnn_simple
with mixed_layer() as lstm_param: # test lstm unit, rnn group
lstm_param += full_matrix_projection(input=seq, size=100 * 4)
with mixed_layer() as gru_param:
gru_param += full_matrix_projection(input=seq, size=100 * 3)
outputs(
last_seq(input=recurrent_group(
step=generate_rnn_simple('rnn_forward'), input=seq)),
first_seq(input=recurrent_group(
step=generate_rnn_simple('rnn_back'), input=seq, reverse=True)),
last_seq(input=recurrent_group(
step=generate_rnn_simple('rnn_subseq_forward'),
input=SubsequenceInput(input=sub_seq))),
last_seq(input=lstmemory_group(
input=lstm_param, size=100)),
last_seq(input=gru_group(
input=gru_param, size=100)),
last_seq(input=recurrent_group(
step=generate_rnn_simple_no_name(), input=seq)), )
| apache-2.0 |
MarkusHackspacher/unknown-horizons | horizons/ai/aiplayer/building/firestation.py | 1 | 3179 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.ai.aiplayer.basicbuilder import BasicBuilder
from horizons.ai.aiplayer.building import AbstractBuilding
from horizons.ai.aiplayer.buildingevaluator import BuildingEvaluator
from horizons.ai.aiplayer.constants import BUILDING_PURPOSE
from horizons.constants import BUILDINGS
class AbstractFireStation(AbstractBuilding):
def iter_potential_locations(self, settlement_manager):
spots_in_settlement = settlement_manager.settlement.buildability_cache.cache[(2, 2)]
village_builder = settlement_manager.village_builder
for coords in village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION].keys():
if coords not in spots_in_settlement or village_builder.plan[coords][1][0] > village_builder.current_section:
continue
object = settlement_manager.settlement.ground_map[coords].object
if object is None or object.buildable_upon:
yield (coords[0], coords[1], 0)
@property
def producer_building(self):
"""Fire stations don't produce any resources."""
return False
@property
def evaluator_class(self):
return FireStationEvaluator
@classmethod
def register_buildings(cls):
cls._available_buildings[BUILDINGS.FIRE_STATION] = cls
class FireStationEvaluator(BuildingEvaluator):
need_collector_connection = False
record_plan_change = False
@classmethod
def create(cls, production_builder, x, y, orientation):
settlement_manager = production_builder.settlement_manager
village_builder = settlement_manager.village_builder
builder = BasicBuilder.create(BUILDINGS.FIRE_STATION, (x, y), orientation)
assigned_residences = village_builder.special_building_assignments[BUILDING_PURPOSE.FIRE_STATION][(x, y)]
total = len(assigned_residences)
not_serviced = 0
for residence_coords in assigned_residences:
if village_builder.plan[residence_coords][0] == BUILDING_PURPOSE.RESIDENCE:
not_serviced += 1
if not_serviced <= 0 or not_serviced < total * settlement_manager.owner.personality_manager.get('AbstractFireStation').fraction_of_assigned_residences_built:
return None
return FireStationEvaluator(village_builder, builder, not_serviced)
@property
def purpose(self):
return BUILDING_PURPOSE.FIRE_STATION
AbstractFireStation.register_buildings()
| gpl-2.0 |
nvoron23/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_augassign.py | 75 | 7968 | # Augmented assignment test.
from test.test_support import run_unittest
import unittest
class AugAssignTest(unittest.TestCase):
def testBasic(self):
x = 2
x += 1
x *= 2
x **= 2
x -= 8
x //= 5
x %= 3
x &= 2
x |= 5
x ^= 1
x /= 2
if 1/2 == 0:
# classic division
self.assertEquals(x, 3)
else:
# new-style division (with -Qnew)
self.assertEquals(x, 3.0)
def testInList(self):
x = [2]
x[0] += 1
x[0] *= 2
x[0] **= 2
x[0] -= 8
x[0] //= 5
x[0] %= 3
x[0] &= 2
x[0] |= 5
x[0] ^= 1
x[0] /= 2
if 1/2 == 0:
self.assertEquals(x[0], 3)
else:
self.assertEquals(x[0], 3.0)
def testInDict(self):
x = {0: 2}
x[0] += 1
x[0] *= 2
x[0] **= 2
x[0] -= 8
x[0] //= 5
x[0] %= 3
x[0] &= 2
x[0] |= 5
x[0] ^= 1
x[0] /= 2
if 1/2 == 0:
self.assertEquals(x[0], 3)
else:
self.assertEquals(x[0], 3.0)
def testSequences(self):
x = [1,2]
x += [3,4]
x *= 2
self.assertEquals(x, [1, 2, 3, 4, 1, 2, 3, 4])
x = [1, 2, 3]
y = x
x[1:2] *= 2
y[1:2] += [1]
self.assertEquals(x, [1, 2, 1, 2, 3])
self.assert_(x is y)
def testCustomMethods1(self):
class aug_test:
def __init__(self, value):
self.val = value
def __radd__(self, val):
return self.val + val
def __add__(self, val):
return aug_test(self.val + val)
class aug_test2(aug_test):
def __iadd__(self, val):
self.val = self.val + val
return self
class aug_test3(aug_test):
def __iadd__(self, val):
return aug_test3(self.val + val)
x = aug_test(1)
y = x
x += 10
self.assert_(isinstance(x, aug_test))
self.assert_(y is not x)
self.assertEquals(x.val, 11)
x = aug_test2(2)
y = x
x += 10
self.assert_(y is x)
self.assertEquals(x.val, 12)
x = aug_test3(3)
y = x
x += 10
self.assert_(isinstance(x, aug_test3))
self.assert_(y is not x)
self.assertEquals(x.val, 13)
def testCustomMethods2(test_self):
output = []
class testall:
def __add__(self, val):
output.append("__add__ called")
def __radd__(self, val):
output.append("__radd__ called")
def __iadd__(self, val):
output.append("__iadd__ called")
return self
def __sub__(self, val):
output.append("__sub__ called")
def __rsub__(self, val):
output.append("__rsub__ called")
def __isub__(self, val):
output.append("__isub__ called")
return self
def __mul__(self, val):
output.append("__mul__ called")
def __rmul__(self, val):
output.append("__rmul__ called")
def __imul__(self, val):
output.append("__imul__ called")
return self
def __div__(self, val):
output.append("__div__ called")
def __rdiv__(self, val):
output.append("__rdiv__ called")
def __idiv__(self, val):
output.append("__idiv__ called")
return self
def __floordiv__(self, val):
output.append("__floordiv__ called")
return self
def __ifloordiv__(self, val):
output.append("__ifloordiv__ called")
return self
def __rfloordiv__(self, val):
output.append("__rfloordiv__ called")
return self
def __truediv__(self, val):
output.append("__truediv__ called")
return self
def __itruediv__(self, val):
output.append("__itruediv__ called")
return self
def __mod__(self, val):
output.append("__mod__ called")
def __rmod__(self, val):
output.append("__rmod__ called")
def __imod__(self, val):
output.append("__imod__ called")
return self
def __pow__(self, val):
output.append("__pow__ called")
def __rpow__(self, val):
output.append("__rpow__ called")
def __ipow__(self, val):
output.append("__ipow__ called")
return self
def __or__(self, val):
output.append("__or__ called")
def __ror__(self, val):
output.append("__ror__ called")
def __ior__(self, val):
output.append("__ior__ called")
return self
def __and__(self, val):
output.append("__and__ called")
def __rand__(self, val):
output.append("__rand__ called")
def __iand__(self, val):
output.append("__iand__ called")
return self
def __xor__(self, val):
output.append("__xor__ called")
def __rxor__(self, val):
output.append("__rxor__ called")
def __ixor__(self, val):
output.append("__ixor__ called")
return self
def __rshift__(self, val):
output.append("__rshift__ called")
def __rrshift__(self, val):
output.append("__rrshift__ called")
def __irshift__(self, val):
output.append("__irshift__ called")
return self
def __lshift__(self, val):
output.append("__lshift__ called")
def __rlshift__(self, val):
output.append("__rlshift__ called")
def __ilshift__(self, val):
output.append("__ilshift__ called")
return self
x = testall()
x + 1
1 + x
x += 1
x - 1
1 - x
x -= 1
x * 1
1 * x
x *= 1
if 1/2 == 0:
x / 1
1 / x
x /= 1
else:
# True division is in effect, so "/" doesn't map to __div__ etc;
# but the canned expected-output file requires that those get called.
x.__div__(1)
x.__rdiv__(1)
x.__idiv__(1)
x // 1
1 // x
x //= 1
x % 1
1 % x
x %= 1
x ** 1
1 ** x
x **= 1
x | 1
1 | x
x |= 1
x & 1
1 & x
x &= 1
x ^ 1
1 ^ x
x ^= 1
x >> 1
1 >> x
x >>= 1
x << 1
1 << x
x <<= 1
test_self.assertEquals(output, '''\
__add__ called
__radd__ called
__iadd__ called
__sub__ called
__rsub__ called
__isub__ called
__mul__ called
__rmul__ called
__imul__ called
__div__ called
__rdiv__ called
__idiv__ called
__floordiv__ called
__rfloordiv__ called
__ifloordiv__ called
__mod__ called
__rmod__ called
__imod__ called
__pow__ called
__rpow__ called
__ipow__ called
__or__ called
__ror__ called
__ior__ called
__and__ called
__rand__ called
__iand__ called
__xor__ called
__rxor__ called
__ixor__ called
__rshift__ called
__rrshift__ called
__irshift__ called
__lshift__ called
__rlshift__ called
__ilshift__ called
'''.splitlines())
def test_main():
run_unittest(AugAssignTest)
if __name__ == '__main__':
test_main()
| apache-2.0 |
EDUlib/eTracesX | Scripts/ExtractCours.py | 1 | 2359 | #!/usr/bin/python
import sys
import getopt
import re
import random
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print 'Input file is :', inputfile
print 'Output file is :', outputfile
if inputfile == '' or outputfile == '':
sys.exit()
pUser =re.compile('"username": "([\w.@&\-]*)"')
pCours =re.compile('ITES\.1')
nameDict = dict()
f = open(inputfile, "r")
copy = open(outputfile, "w")
for line in f:
mCours = pCours.search(line)
if mCours:
mUser = pUser.findall(line)
newLine = ''
if len(mUser) == 1:
if mUser[0] != '':
if not nameDict.has_key(mUser[0]):
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = 0;
while (newName in nameDict.values()) and i < 1000:
newName = ''.join(random.SystemRandom().choice('0123456789ABCDEF') for _ in range(16))
i = i+1;
if i == 1000:
print "Can't find a name :", mUser[0]
sys.exit()
nameDict[mUser[0]] = newName;
# print 'Username is :', mUser[0], ' --- newName :', nameDict[mUser[0]]
newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + nameDict[mUser[0]] + '"', line)
# newLine = re.sub('"username": "'+ mUser[0] + '"', '"username": "' + mUser[0] + '"', line)
# newLine = line
else:
newLine = line
else:
print line
sys.exit()
if newLine != '':
copy.write(newLine)
f.close()
copy.close()
if __name__ == "__main__":
main(sys.argv[1:])
| agpl-3.0 |
imsplitbit/nova | nova/api/openstack/compute/contrib/cells.py | 1 | 15576 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self, ext_mgr):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.ext_mgr = ext_mgr
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
if not self.ext_mgr.is_loaded('os-cell-capacities'):
raise exc.HTTPNotFound()
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2013-05-14T00:00:00+00:00"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension('os-cells',
Controller(self.ext_mgr), collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
| apache-2.0 |
Triv90/Heat | heat/tests/test_parser.py | 1 | 49333 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from nose.plugins.attrib import attr
import mox
import uuid
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import parser
from heat.engine import parameters
from heat.engine import template
from heat.tests.utils import stack_delete_after
from heat.tests import generic_resource as generic_rsrc
import heat.db as db_api
def join(raw):
return parser.Template.resolve_joins(raw)
@attr(tag=['unit', 'parser'])
@attr(speed='fast')
class ParserTest(unittest.TestCase):
def test_list(self):
raw = ['foo', 'bar', 'baz']
parsed = join(raw)
for i in xrange(len(raw)):
self.assertEqual(parsed[i], raw[i])
self.assertTrue(parsed is not raw)
def test_dict(self):
raw = {'foo': 'bar', 'blarg': 'wibble'}
parsed = join(raw)
for k in raw:
self.assertEqual(parsed[k], raw[k])
self.assertTrue(parsed is not raw)
def test_dict_list(self):
raw = {'foo': ['bar', 'baz'], 'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(parsed['blarg'], raw['blarg'])
for i in xrange(len(raw['foo'])):
self.assertEqual(parsed['foo'][i], raw['foo'][i])
self.assertTrue(parsed is not raw)
self.assertTrue(parsed['foo'] is not raw['foo'])
def test_list_dict(self):
raw = [{'foo': 'bar', 'blarg': 'wibble'}, 'baz', 'quux']
parsed = join(raw)
for i in xrange(1, len(raw)):
self.assertEqual(parsed[i], raw[i])
for k in raw[0]:
self.assertEqual(parsed[0][k], raw[0][k])
self.assertTrue(parsed is not raw)
self.assertTrue(parsed[0] is not raw[0])
def test_join(self):
raw = {'Fn::Join': [' ', ['foo', 'bar', 'baz']]}
self.assertEqual(join(raw), 'foo bar baz')
def test_join_none(self):
raw = {'Fn::Join': [' ', ['foo', None, 'baz']]}
self.assertEqual(join(raw), 'foo baz')
def test_join_list(self):
raw = [{'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg', 'wibble']
parsed = join(raw)
self.assertEqual(parsed[0], 'foo bar baz')
for i in xrange(1, len(raw)):
self.assertEqual(parsed[i], raw[i])
self.assertTrue(parsed is not raw)
def test_join_dict_val(self):
raw = {'quux': {'Fn::Join': [' ', ['foo', 'bar', 'baz']]},
'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(parsed['quux'], 'foo bar baz')
self.assertEqual(parsed['blarg'], raw['blarg'])
self.assertTrue(parsed is not raw)
def test_join_recursive(self):
raw = {'Fn::Join': ['\n', [{'Fn::Join':
[' ', ['foo', 'bar']]}, 'baz']]}
self.assertEqual(join(raw), 'foo bar\nbaz')
mapping_template = template_format.parse('''{
"Mappings" : {
"ValidMapping" : {
"TestKey" : { "TestValue" : "wibble" }
},
"InvalidMapping" : {
"ValueList" : [ "foo", "bar" ],
"ValueString" : "baz"
},
"MapList": [ "foo", { "bar" : "baz" } ],
"MapString": "foobar"
}
}''')
@attr(tag=['unit', 'parser', 'template'])
@attr(speed='fast')
class TemplateTest(unittest.TestCase):
def setUp(self):
self.m = mox.Mox()
def tearDown(self):
self.m.UnsetStubs()
def test_defaults(self):
empty = parser.Template({})
try:
empty[template.VERSION]
except KeyError:
pass
else:
self.fail('Expected KeyError for version not present')
self.assertEqual(empty[template.DESCRIPTION], 'No description')
self.assertEqual(empty[template.MAPPINGS], {})
self.assertEqual(empty[template.PARAMETERS], {})
self.assertEqual(empty[template.RESOURCES], {})
self.assertEqual(empty[template.OUTPUTS], {})
def test_invalid_section(self):
tmpl = parser.Template({'Foo': ['Bar']})
try:
tmpl['Foo']
except KeyError:
pass
else:
self.fail('Expected KeyError for invalid template key')
def test_find_in_map(self):
tmpl = parser.Template(mapping_template)
find = {'Fn::FindInMap': ["ValidMapping", "TestKey", "TestValue"]}
self.assertEqual(tmpl.resolve_find_in_map(find), "wibble")
def test_find_in_invalid_map(self):
tmpl = parser.Template(mapping_template)
finds = ({'Fn::FindInMap': ["InvalidMapping", "ValueList", "foo"]},
{'Fn::FindInMap': ["InvalidMapping", "ValueString", "baz"]},
{'Fn::FindInMap': ["MapList", "foo", "bar"]},
{'Fn::FindInMap': ["MapString", "foo", "bar"]})
for find in finds:
self.assertRaises(KeyError, tmpl.resolve_find_in_map, find)
def test_bad_find_in_map(self):
tmpl = parser.Template(mapping_template)
finds = ({'Fn::FindInMap': "String"},
{'Fn::FindInMap': {"Dict": "String"}},
{'Fn::FindInMap': ["ShortList", "foo"]},
{'Fn::FindInMap': ["ReallyShortList"]})
for find in finds:
self.assertRaises(KeyError, tmpl.resolve_find_in_map, find)
def test_param_refs(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
p_snippet = {"Ref": "foo"}
self.assertEqual(parser.Template.resolve_param_refs(p_snippet, params),
"bar")
def test_param_refs_resource(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
r_snippet = {"Ref": "baz"}
self.assertEqual(parser.Template.resolve_param_refs(r_snippet, params),
r_snippet)
def test_param_ref_missing(self):
tmpl = {'Parameters': {'foo': {'Type': 'String', 'Required': True}}}
params = parameters.Parameters('test', tmpl)
snippet = {"Ref": "foo"}
self.assertRaises(exception.UserParameterMissing,
parser.Template.resolve_param_refs,
snippet, params)
def test_resource_refs(self):
resources = {'foo': self.m.CreateMock(resource.Resource),
'blarg': self.m.CreateMock(resource.Resource)}
resources['foo'].FnGetRefId().AndReturn('bar')
self.m.ReplayAll()
r_snippet = {"Ref": "foo"}
self.assertEqual(parser.Template.resolve_resource_refs(r_snippet,
resources),
"bar")
self.m.VerifyAll()
def test_resource_refs_param(self):
resources = {'foo': 'bar', 'blarg': 'wibble'}
p_snippet = {"Ref": "baz"}
self.assertEqual(parser.Template.resolve_resource_refs(p_snippet,
resources),
p_snippet)
def test_join_reduce(self):
join = {"Fn::Join": [" ", ["foo", "bar", "baz", {'Ref': 'baz'},
"bink", "bonk"]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", ["foo bar baz", {'Ref': 'baz'}, "bink bonk"]]})
join = {"Fn::Join": [" ", ["foo", {'Ref': 'baz'},
"bink"]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", ["foo", {'Ref': 'baz'}, "bink"]]})
join = {"Fn::Join": [" ", [{'Ref': 'baz'}]]}
self.assertEqual(
parser.Template.reduce_joins(join),
{"Fn::Join": [" ", [{'Ref': 'baz'}]]})
def test_join(self):
join = {"Fn::Join": [" ", ["foo", "bar"]]}
self.assertEqual(parser.Template.resolve_joins(join), "foo bar")
def test_join_string(self):
join = {"Fn::Join": [" ", "foo"]}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join)
def test_join_dict(self):
join = {"Fn::Join": [" ", {"foo": "bar"}]}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join)
def test_join_wrong_num_args(self):
join0 = {"Fn::Join": []}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join0)
join1 = {"Fn::Join": [" "]}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join1)
join3 = {"Fn::Join": [" ", {"foo": "bar"}, ""]}
self.assertRaises(ValueError, parser.Template.resolve_joins,
join3)
def test_join_string_nodelim(self):
join1 = {"Fn::Join": "o"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join1)
join2 = {"Fn::Join": "oh"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join2)
join3 = {"Fn::Join": "ohh"}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join3)
def test_join_dict_nodelim(self):
join1 = {"Fn::Join": {"foo": "bar"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join1)
join2 = {"Fn::Join": {"foo": "bar", "blarg": "wibble"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join2)
join3 = {"Fn::Join": {"foo": "bar", "blarg": "wibble", "baz": "quux"}}
self.assertRaises(TypeError, parser.Template.resolve_joins,
join3)
def test_base64(self):
snippet = {"Fn::Base64": "foobar"}
# For now, the Base64 function just returns the original text, and
# does not convert to base64 (see issue #133)
self.assertEqual(parser.Template.resolve_base64(snippet), "foobar")
def test_base64_list(self):
list_snippet = {"Fn::Base64": ["foobar"]}
self.assertRaises(TypeError, parser.Template.resolve_base64,
list_snippet)
def test_base64_dict(self):
dict_snippet = {"Fn::Base64": {"foo": "bar"}}
self.assertRaises(TypeError, parser.Template.resolve_base64,
dict_snippet)
@attr(tag=['unit', 'parser', 'stack'])
@attr(speed='fast')
class StackTest(unittest.TestCase):
def setUp(self):
self.username = 'parser_stack_test_user'
self.m = mox.Mox()
self.ctx = context.get_admin_context()
self.m.StubOutWithMock(self.ctx, 'username')
self.ctx.username = self.username
self.ctx.tenant_id = 'test_tenant'
generic_rsrc.GenericResource.properties_schema = {}
resource._register_class('GenericResourceType',
generic_rsrc.GenericResource)
self.m.ReplayAll()
def tearDown(self):
self.m.UnsetStubs()
def test_state_defaults(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}))
self.assertEqual(stack.state, None)
self.assertEqual(stack.state_description, '')
def test_state(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}),
state='foo')
self.assertEqual(stack.state, 'foo')
stack.state_set('bar', '')
self.assertEqual(stack.state, 'bar')
def test_state_description(self):
stack = parser.Stack(None, 'test_stack', parser.Template({}),
state_description='quux')
self.assertEqual(stack.state_description, 'quux')
stack.state_set('blarg', 'wibble')
self.assertEqual(stack.state_description, 'wibble')
def test_load_nonexistant_id(self):
self.assertRaises(exception.NotFound, parser.Stack.load,
None, -1)
# Note tests creating a stack should be decorated with @stack_delete_after
# to ensure the self.stack is properly cleaned up
@stack_delete_after
def test_identifier(self):
self.stack = parser.Stack(self.ctx, 'identifier_test',
parser.Template({}))
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(identifier.tenant, self.ctx.tenant_id)
self.assertEqual(identifier.stack_name, 'identifier_test')
self.assertTrue(identifier.stack_id)
self.assertFalse(identifier.path)
@stack_delete_after
def test_set_param_id(self):
dummy_stackid = 'STACKABCD1234'
self.m.StubOutWithMock(uuid, 'uuid4')
uuid.uuid4().AndReturn(dummy_stackid)
self.m.ReplayAll()
self.stack = parser.Stack(self.ctx, 'param_arn_test',
parser.Template({}))
exp_prefix = 'arn:openstack:heat::test_tenant:stacks/param_arn_test/'
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + 'None')
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
exp_prefix + dummy_stackid)
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
self.m.VerifyAll()
@stack_delete_after
def test_load_param_id(self):
self.stack = parser.Stack(self.ctx, 'param_load_arn_test',
parser.Template({}))
self.stack.store()
identifier = self.stack.identifier()
self.assertEqual(self.stack.parameters['AWS::StackId'],
identifier.arn())
newstack = parser.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual(newstack.parameters['AWS::StackId'], identifier.arn())
@stack_delete_after
def test_created_time(self):
self.stack = parser.Stack(self.ctx, 'creation_time_test',
parser.Template({}))
self.assertEqual(self.stack.created_time, None)
self.stack.store()
self.assertNotEqual(self.stack.created_time, None)
@stack_delete_after
def test_updated_time(self):
self.stack = parser.Stack(self.ctx, 'update_time_test',
parser.Template({}))
self.assertEqual(self.stack.updated_time, None)
self.stack.store()
stored_time = self.stack.updated_time
self.stack.state_set(self.stack.CREATE_IN_PROGRESS, 'testing')
self.assertNotEqual(self.stack.updated_time, None)
self.assertNotEqual(self.stack.updated_time, stored_time)
@stack_delete_after
def test_delete(self):
self.stack = parser.Stack(self.ctx, 'delete_test',
parser.Template({}))
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.DELETE_COMPLETE)
@stack_delete_after
def test_delete_rollback(self):
self.stack = parser.Stack(self.ctx, 'delete_rollback_test',
parser.Template({}), disable_rollback=False)
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete(action=self.stack.ROLLBACK)
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.ROLLBACK_COMPLETE)
@stack_delete_after
def test_delete_badaction(self):
self.stack = parser.Stack(self.ctx, 'delete_badaction_test',
parser.Template({}))
stack_id = self.stack.store()
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.stack.delete(action="wibble")
db_s = db_api.stack_get(self.ctx, stack_id)
self.assertNotEqual(db_s, None)
self.assertEqual(self.stack.state, self.stack.DELETE_FAILED)
@stack_delete_after
def test_update_badstate(self):
self.stack = parser.Stack(self.ctx, 'test_stack', parser.Template({}),
state=parser.Stack.CREATE_FAILED)
stack_id = self.stack.store()
self.assertEqual(self.stack.state, parser.Stack.CREATE_FAILED)
self.stack.update({})
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
@stack_delete_after
def test_resource_by_refid(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'resource_by_refid_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertTrue('AResource' in self.stack)
resource = self.stack['AResource']
resource.resource_id_set('aaaa')
self.assertNotEqual(None, resource)
self.assertEqual(resource, self.stack.resource_by_refid('aaaa'))
resource.state = resource.DELETE_IN_PROGRESS
self.assertEqual(None, self.stack.resource_by_refid('aaaa'))
self.assertEqual(None, self.stack.resource_by_refid('bbbb'))
@stack_delete_after
def test_update_add(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertTrue('BResource' in self.stack)
@stack_delete_after
def test_update_remove(self):
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertFalse('BResource' in self.stack)
@stack_delete_after
def test_update_description(self):
tmpl = {'Description': 'ATemplate',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Description': 'BTemplate',
'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack.t[template.DESCRIPTION], 'BTemplate')
@stack_delete_after
def test_update_modify_ok_replace(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'xyz')
self.m.VerifyAll()
@stack_delete_after
def test_update_modify_update_failed(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_FAILED)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_modify_replace_failed_delete(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# make the update fail deleting the existing resource
self.m.StubOutWithMock(resource.Resource, 'destroy')
resource.Resource.destroy().AndReturn("Error")
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
# Unset here so destroy() is not stubbed for stack.delete cleanup
self.m.UnsetStubs()
@stack_delete_after
def test_update_modify_replace_failed_create(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=True)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_update
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail creating
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_add_failed_create(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_create making BResource fail creating
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_FAILED)
self.assertTrue('BResource' in self.stack)
# Reload the stack from the DB and prove that it contains the failed
# resource (to ensure it will be deleted on stack delete)
re_stack = parser.Stack.load(self.ctx, stack_id=self.stack.id)
self.assertTrue('BResource' in re_stack)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# There will be two calls to handle_update, one for the new template
# then another (with the initial template) for rollback
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, but succeed the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_fail(self):
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'xyz'}}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# There will be two calls to handle_update, one for the new template
# then another (with the initial template) for rollback
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, and again on the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_FAILED)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_add(self):
tmpl = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy handle_create making the replace fail when creating
# the replacement resource, and succeed on the second call (rollback)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertFalse('BResource' in self.stack)
self.m.VerifyAll()
@stack_delete_after
def test_update_rollback_remove(self):
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType'},
'BResource': {'Type': 'GenericResourceType'}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
tmpl2 = {'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
# patch in a dummy destroy making the delete fail
self.m.StubOutWithMock(resource.Resource, 'destroy')
resource.Resource.destroy().AndReturn('Error')
self.m.ReplayAll()
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertTrue('BResource' in self.stack)
self.m.VerifyAll()
# Unset here so destroy() is not stubbed for stack.delete cleanup
self.m.UnsetStubs()
@stack_delete_after
def test_update_replace_by_reference(self):
'''
assertion:
changes in dynamic attributes, due to other resources been updated
are not ignored and can cause dependant resources to be updated.
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'inst-007'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'inst-007')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2))
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.UPDATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'smelly')
self.assertEqual(self.stack['BResource'].properties['Foo'], 'inst-007')
self.m.VerifyAll()
@stack_delete_after
def test_update_by_reference_and_rollback_1(self):
'''
assertion:
check that rollback still works with dynamic metadata
this test fails the first instance
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
# mocks for first (failed update)
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
# mock to make the replace fail when creating the replacement resource
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
# mocks for second rollback update
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'AResource')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2),
disable_rollback=False)
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
@stack_delete_after
def test_update_by_reference_and_rollback_2(self):
'''
assertion:
check that rollback still works with dynamic metadata
this test fails the second instance
'''
# patch in a dummy property schema for GenericResource
dummy_schema = {'Foo': {'Type': 'String'}}
generic_rsrc.GenericResource.properties_schema = dummy_schema
tmpl = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'abc'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
tmpl2 = {'Resources': {
'AResource': {'Type': 'GenericResourceType',
'Properties': {'Foo': 'smelly'}},
'BResource': {'Type': 'GenericResourceType',
'Properties': {
'Foo': {'Ref': 'AResource'}}}}}
self.stack = parser.Stack(self.ctx, 'update_test_stack',
template.Template(tmpl),
disable_rollback=False)
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.assertEqual(self.stack['BResource'].properties['Foo'],
'AResource')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_update')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'FnGetRefId')
self.m.StubOutWithMock(generic_rsrc.GenericResource, 'handle_create')
# mocks for first and second (failed update)
generic_rsrc.GenericResource.handle_update(
tmpl2['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'inst-007'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'AResource')
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.UPDATE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_COMPLETE)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.properties.validate()
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.CREATE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# mock to make the replace fail when creating the second
# replacement resource
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.handle_create().AndRaise(Exception)
# mocks for second rollback update
generic_rsrc.GenericResource.handle_update(
tmpl['Resources']['AResource']).AndReturn(
resource.Resource.UPDATE_REPLACE)
br2_snip = {'Type': 'GenericResourceType',
'Properties': {'Foo': 'AResource'}}
generic_rsrc.GenericResource.handle_update(
br2_snip).AndReturn(
resource.Resource.UPDATE_REPLACE)
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
# self.state_set(self.DELETE_IN_PROGRESS)
generic_rsrc.GenericResource.FnGetRefId().AndReturn(
'inst-007')
generic_rsrc.GenericResource.handle_create().AndReturn(None)
generic_rsrc.GenericResource.handle_create().AndReturn(None)
# reverting to AResource
generic_rsrc.GenericResource.FnGetRefId().MultipleTimes().AndReturn(
'AResource')
self.m.ReplayAll()
updated_stack = parser.Stack(self.ctx, 'updated_stack',
template.Template(tmpl2),
disable_rollback=False)
self.stack.update(updated_stack)
self.assertEqual(self.stack.state, parser.Stack.ROLLBACK_COMPLETE)
self.assertEqual(self.stack['AResource'].properties['Foo'], 'abc')
self.m.VerifyAll()
def test_stack_name_valid(self):
stack = parser.Stack(None, 's', parser.Template({}))
stack = parser.Stack(None, 'stack123', parser.Template({}))
stack = parser.Stack(None, 'test.stack', parser.Template({}))
stack = parser.Stack(None, 'test_stack', parser.Template({}))
stack = parser.Stack(None, 'TEST', parser.Template({}))
stack = parser.Stack(None, 'test-stack', parser.Template({}))
def test_stack_name_invalid(self):
self.assertRaises(ValueError, parser.Stack, None, '_foo',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '1bad',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '.kcats',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, ' teststack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '^-^',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '\"stack\"',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '1234',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'cat|dog',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '$(foo)',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test/stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test\stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test::stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test;stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, 'test~stack',
parser.Template({}))
self.assertRaises(ValueError, parser.Stack, None, '#test',
parser.Template({}))
@stack_delete_after
def test_resource_state_get_att(self):
tmpl = {
'Resources': {'AResource': {'Type': 'GenericResourceType'}},
'Outputs': {'TestOutput': {'Value': {
'Fn::GetAtt': ['AResource', 'Foo']}}
}
}
self.stack = parser.Stack(self.ctx, 'resource_state_get_att',
template.Template(tmpl))
self.stack.store()
self.stack.create()
self.assertEqual(self.stack.state, parser.Stack.CREATE_COMPLETE)
self.assertTrue('AResource' in self.stack)
rsrc = self.stack['AResource']
rsrc.resource_id_set('aaaa')
self.assertEqual('AResource', rsrc.FnGetAtt('foo'))
for state in (
rsrc.CREATE_IN_PROGRESS,
rsrc.CREATE_COMPLETE,
rsrc.UPDATE_IN_PROGRESS,
rsrc.UPDATE_COMPLETE):
rsrc.state = state
self.assertEqual('AResource', self.stack.output('TestOutput'))
for state in (
rsrc.CREATE_FAILED,
rsrc.DELETE_IN_PROGRESS,
rsrc.DELETE_FAILED,
rsrc.DELETE_COMPLETE,
rsrc.UPDATE_FAILED,
None):
rsrc.state = state
self.assertEqual(None, self.stack.output('TestOutput'))
rsrc.state = rsrc.CREATE_COMPLETE
| apache-2.0 |
mrquim/mrquimrepo | plugin.video.playlistLoader/resources/lib/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| gpl-2.0 |
burnash/skype4py | Skype4Py/lang/no.py | 23 | 6713 | apiAttachAvailable = u'API tilgjengelig'
apiAttachNotAvailable = u'Ikke tilgjengelig'
apiAttachPendingAuthorization = u'Venter p\xe5 \xe5 bli godkjent'
apiAttachRefused = u'Avsl\xe5tt'
apiAttachSuccess = u'Vellykket'
apiAttachUnknown = u'Ukjent'
budDeletedFriend = u'Slettet fra kontaktlisten'
budFriend = u'Venn'
budNeverBeenFriend = u'Aldri v\xe6rt i kontaktlisten'
budPendingAuthorization = u'Venter p\xe5 \xe5 bli godkjent'
budUnknown = u'Ukjent'
cfrBlockedByRecipient = u'Anrop blokkert av mottaker'
cfrMiscError = u'Diverse feil'
cfrNoCommonCodec = u'Ingen felles kodek'
cfrNoProxyFound = u'Finner ingen proxy'
cfrNotAuthorizedByRecipient = u'Gjeldende bruker er ikke godkjent av mottakeren.'
cfrRecipientNotFriend = u'Mottakeren er ikke en venn'
cfrRemoteDeviceError = u'Problem med ekstern lydenhet'
cfrSessionTerminated = u'\xd8kt avsluttet'
cfrSoundIOError = u'I/U-feil for lyd'
cfrSoundRecordingError = u'Lydinnspillingsfeil'
cfrUnknown = u'Ukjent'
cfrUserDoesNotExist = u'Bruker/telefonnummer finnes ikke'
cfrUserIsOffline = u'Hun eller han er frakoblet'
chsAllCalls = u'Foreldet dialogboks'
chsDialog = u'Dialogboks'
chsIncomingCalls = u'Flere m\xe5 godkjenne'
chsLegacyDialog = u'Foreldet dialogboks'
chsMissedCalls = u'Dialogboks'
chsMultiNeedAccept = u'Flere m\xe5 godkjenne'
chsMultiSubscribed = u'Flere abonnert'
chsOutgoingCalls = u'Flere abonnert'
chsUnknown = u'Ukjent'
chsUnsubscribed = u'Ikke abonnert'
clsBusy = u'Opptatt'
clsCancelled = u'Avbryt'
clsEarlyMedia = u'Spiller tidlige media (Early Media)'
clsFailed = u'beklager, anropet feilet!'
clsFinished = u'Avsluttet'
clsInProgress = u'Anrop p\xe5g\xe5r'
clsLocalHold = u'Lokalt parkert samtale'
clsMissed = u'Tapt anrop'
clsOnHold = u'Parkert'
clsRefused = u'Avsl\xe5tt'
clsRemoteHold = u'Eksternt parkert samtale'
clsRinging = u'Anrop'
clsRouting = u'Ruting'
clsTransferred = u'Ukjent'
clsTransferring = u'Ukjent'
clsUnknown = u'Ukjent'
clsUnplaced = u'Aldri plassert'
clsVoicemailBufferingGreeting = u'Bufrer talepostintro'
clsVoicemailCancelled = u'Talepostmelding er annullert'
clsVoicemailFailed = u'Talepost feilet'
clsVoicemailPlayingGreeting = u'Spiller av hilsen'
clsVoicemailRecording = u'Tar opp talepost'
clsVoicemailSent = u'Talepostmelding er sendt'
clsVoicemailUploading = u'Laster opp talepost'
cltIncomingP2P = u'Innkommende P2P-anrop'
cltIncomingPSTN = u'Innkommende telefonanrop'
cltOutgoingP2P = u'Utg\xe5ende P2P-anrop'
cltOutgoingPSTN = u'Utg\xe5ende telefonanrop'
cltUnknown = u'Ukjent'
cmeAddedMembers = u'Medlemmer som er lagt til'
cmeCreatedChatWith = u'Opprettet tekstsamtale med'
cmeEmoted = u'Ukjent'
cmeLeft = u'Forlatt'
cmeSaid = u'Sa'
cmeSawMembers = u'Medlemmer som ble sett'
cmeSetTopic = u'Angitt emne'
cmeUnknown = u'Ukjent'
cmsRead = u'Lest'
cmsReceived = u'Mottatt'
cmsSending = u'Sender...'
cmsSent = u'Sendt'
cmsUnknown = u'Ukjent'
conConnecting = u'Kobler til'
conOffline = u'Frakoblet'
conOnline = u'P\xe5logget'
conPausing = u'Settes i pause'
conUnknown = u'Ukjent'
cusAway = u'Borte'
cusDoNotDisturb = u'Opptatt'
cusInvisible = u'Vis som Usynlig'
cusLoggedOut = u'Frakoblet'
cusNotAvailable = u'Ikke tilgjengelig'
cusOffline = u'Frakoblet'
cusOnline = u'P\xe5logget'
cusSkypeMe = u'Skype Meg'
cusUnknown = u'Ukjent'
cvsBothEnabled = u'Videosending og -mottak'
cvsNone = u'Ingen video'
cvsReceiveEnabled = u'Videomottak'
cvsSendEnabled = u'Videosending'
cvsUnknown = u''
grpAllFriends = u'Alle venner'
grpAllUsers = u'Alle brukere'
grpCustomGroup = u'Tilpasset'
grpOnlineFriends = u'Elektroniske venner'
grpPendingAuthorizationFriends = u'Venter p\xe5 \xe5 bli godkjent'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'Nylig kontaktede brukere'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype-venner'
grpSkypeOutFriends = u'SkypeOut-venner'
grpUngroupedFriends = u'Usorterte venner'
grpUnknown = u'Ukjent'
grpUsersAuthorizedByMe = u'Godkjent av meg'
grpUsersBlockedByMe = u'Blokkert av meg'
grpUsersWaitingMyAuthorization = u'Venter p\xe5 min godkjenning'
leaAddDeclined = u'Tillegging avvist'
leaAddedNotAuthorized = u'Den som legger til, m\xe5 v\xe6re autorisert'
leaAdderNotFriend = u'Den som legger til, m\xe5 v\xe6re en venn'
leaUnknown = u'Ukjent'
leaUnsubscribe = u'Ikke abonnert'
leaUserIncapable = u'Bruker forhindret'
leaUserNotFound = u'Finner ikke bruker'
olsAway = u'Borte'
olsDoNotDisturb = u'Opptatt'
olsNotAvailable = u'Ikke tilgjengelig'
olsOffline = u'Frakoblet'
olsOnline = u'P\xe5logget'
olsSkypeMe = u'Skype Meg'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'Ukjent'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Kvinne'
usexMale = u'Mann'
usexUnknown = u'Ukjent'
vmrConnectError = u'Koblingsfeil'
vmrFileReadError = u'Fillesingsfeil'
vmrFileWriteError = u'Filskrivingsfeil'
vmrMiscError = u'Diverse feil'
vmrNoError = u'Ingen feil'
vmrNoPrivilege = u'Intet talepostprivilegium'
vmrNoVoicemail = u'Ingen slik talepost'
vmrPlaybackError = u'Avspillingsfeil'
vmrRecordingError = u'Innspillingsfeil'
vmrUnknown = u'Ukjent'
vmsBlank = u'Tom'
vmsBuffering = u'Bufring'
vmsDeleting = u'Sletter'
vmsDownloading = u'Laster ned'
vmsFailed = u'Mislyktes'
vmsNotDownloaded = u'Ikke nedlastet'
vmsPlayed = u'Spilt av'
vmsPlaying = u'Spiller av'
vmsRecorded = u'Innspilt'
vmsRecording = u'Tar opp talepost'
vmsUnknown = u'Ukjent'
vmsUnplayed = u'Ikke avspilt'
vmsUploaded = u'Lastet opp'
vmsUploading = u'Laster opp'
vmtCustomGreeting = u'Tilpasset hilsen'
vmtDefaultGreeting = u'Standardhilsen'
vmtIncoming = u'Innkommende talepost'
vmtOutgoing = u'Utg\xe5ende'
vmtUnknown = u'Ukjent'
vssAvailable = u'Tilgjengelig'
vssNotAvailable = u'Ikke tilgjengelig'
vssPaused = u'Satt i pause'
vssRejected = u'Avvist'
vssRunning = u'Kj\xf8rer'
vssStarting = u'Starter'
vssStopping = u'Stanser'
vssUnknown = u'Ukjent'
| bsd-3-clause |
inflatus/Python | Weather/weather_email.py | 1 | 2252 | # using JSON and the WeatherUnderground API
# parsing data and emailing it to myself
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import urllib.request
import json
from API_KEYS import EMAIL_ADDRESS, EMAIL_PASSWORD
from API_KEYS import WEATHER_UNDERGROUND_KEY
# getting the url
f = urllib.request.urlopen('http://api.wunderground.com/api/' + WEATHER_UNDERGROUND_KEY + '/geolookup/conditions/q/IN/Martinsville.json')
# decoding the text
json_string = f.read().decode('utf-8')
# parsing the information
parsed_json = json.loads(json_string)
location = parsed_json['location']['city']
temp_f = parsed_json['current_observation']['temp_f']
relative_humidity = parsed_json['current_observation']['relative_humidity']
wind_mph = parsed_json['current_observation']['wind_mph']
wind_gust = parsed_json['current_observation']['wind_gust_mph']
pressure_mb = parsed_json['current_observation']['pressure_mb']
feels_like = parsed_json['current_observation']['feelslike_f']
visibility_mi = parsed_json['current_observation']['visibility_mi']
precipitation_in = parsed_json['current_observation']['precip_today_in']
weather = parsed_json['current_observation']['weather']
# setting the data for location and temperature
data = (('Current temperature in {} is: {} F\n'.format(location, temp_f)) +
('Relative Humidity is at: {}\n'.format(relative_humidity)) +
('Winds are: {} mph\n'.format(wind_mph)) +
('Wind gusts are at: {} mph\n'.format(wind_gust)) +
('Pressure is: {} mb\n'.format(pressure_mb)) +
('Feels like: {} F\n'.format(feels_like)) +
('Visibility is: {} mi\n'.format(visibility_mi)) +
('Precipitation today: {} inches\n'.format(precipitation_in)) +
('General weather is: {}'.format(weather)))
# compose email message
fromaddr = (EMAIL_ADDRESS)
toaddr = (EMAIL_ADDRESS)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Current Weather"
body = (data)
msg.attach(MIMEText(body, 'plain'))
# authenticate and send email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, (EMAIL_PASSWORD))
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
| mit |
dennybaa/st2 | st2reactor/st2reactor/rules/filter.py | 2 | 7444 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from jsonpath_rw import parse
from st2common import log as logging
import st2common.operators as criteria_operators
from st2common.constants.rules import TRIGGER_PAYLOAD_PREFIX, RULE_TYPE_BACKSTOP
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.services.keyvalues import KeyValueLookup
from st2common.util.templating import render_template_with_system_context
LOG = logging.getLogger('st2reactor.ruleenforcement.filter')
class RuleFilter(object):
def __init__(self, trigger_instance, trigger, rule, extra_info=False):
"""
:param trigger_instance: TriggerInstance DB object.
:type trigger_instance: :class:`TriggerInstanceDB``
:param trigger: Trigger DB object.
:type trigger: :class:`TriggerDB`
:param rule: Rule DB object.
:type rule: :class:`RuleDB`
"""
self.trigger_instance = trigger_instance
self.trigger = trigger
self.rule = rule
self.extra_info = extra_info
# Base context used with a logger
self._base_logger_context = {
'rule': self.rule,
'trigger': self.trigger,
'trigger_instance': self.trigger_instance
}
def filter(self):
"""
Return true if the rule is applicable to the provided trigger instance.
:rtype: ``bool``
"""
LOG.info('Validating rule %s for %s.', self.rule.ref, self.trigger['name'],
extra=self._base_logger_context)
if not self.rule.enabled:
return False
criteria = self.rule.criteria
is_rule_applicable = True
if criteria and not self.trigger_instance.payload:
return False
payload_lookup = PayloadLookup(self.trigger_instance.payload)
LOG.debug('Trigger payload: %s', self.trigger_instance.payload,
extra=self._base_logger_context)
for criterion_k in criteria.keys():
criterion_v = criteria[criterion_k]
is_rule_applicable, payload_value, criterion_pattern = self._check_criterion(
criterion_k, criterion_v, payload_lookup)
if not is_rule_applicable:
if self.extra_info:
criteria_extra_info = '\n'.join([
' key: %s' % criterion_k,
' pattern: %s' % criterion_pattern,
' type: %s' % criterion_v['type'],
' payload: %s' % payload_value
])
LOG.info('Validation for rule %s failed on criteria -\n%s', self.rule.ref,
criteria_extra_info,
extra=self._base_logger_context)
break
if not is_rule_applicable:
LOG.debug('Rule %s not applicable for %s.', self.rule.id, self.trigger['name'],
extra=self._base_logger_context)
return is_rule_applicable
def _check_criterion(self, criterion_k, criterion_v, payload_lookup):
if 'type' not in criterion_v:
# Comparison operator type not specified, can't perform a comparison
return False
criteria_operator = criterion_v['type']
criteria_pattern = criterion_v.get('pattern', None)
# Render the pattern (it can contain a jinja expressions)
try:
criteria_pattern = self._render_criteria_pattern(criteria_pattern=criteria_pattern)
except Exception:
LOG.exception('Failed to render pattern value "%s" for key "%s"' %
(criteria_pattern, criterion_k), extra=self._base_logger_context)
return False
try:
matches = payload_lookup.get_value(criterion_k)
# pick value if only 1 matches else will end up being an array match.
if matches:
payload_value = matches[0] if len(matches) > 0 else matches
else:
payload_value = None
except:
LOG.exception('Failed transforming criteria key %s', criterion_k,
extra=self._base_logger_context)
return False
op_func = criteria_operators.get_operator(criteria_operator)
try:
result = op_func(value=payload_value, criteria_pattern=criteria_pattern)
except:
LOG.exception('There might be a problem with critera in rule %s.', self.rule,
extra=self._base_logger_context)
return False
return result, payload_value, criteria_pattern
def _render_criteria_pattern(self, criteria_pattern):
if not criteria_pattern:
return None
if not isinstance(criteria_pattern, six.string_types):
# We only perform rendering if value is a string - rendering a non-string value
# makes no sense
return criteria_pattern
criteria_pattern = render_template_with_system_context(value=criteria_pattern)
return criteria_pattern
class SecondPassRuleFilter(RuleFilter):
"""
Special filter that handles all second pass rules. For not these are only
backstop rules i.e. those that can match when no other rule has matched.
"""
def __init__(self, trigger_instance, trigger, rule, first_pass_matched):
"""
:param trigger_instance: TriggerInstance DB object.
:type trigger_instance: :class:`TriggerInstanceDB``
:param trigger: Trigger DB object.
:type trigger: :class:`TriggerDB`
:param rule: Rule DB object.
:type rule: :class:`RuleDB`
:param first_pass_matched: Rules that matched in the first pass.
:type first_pass_matched: `list`
"""
super(SecondPassRuleFilter, self).__init__(trigger_instance, trigger, rule)
self.first_pass_matched = first_pass_matched
def filter(self):
# backstop rules only apply if no rule matched in the first pass.
if self.first_pass_matched and self._is_backstop_rule():
return False
return super(SecondPassRuleFilter, self).filter()
def _is_backstop_rule(self):
return self.rule.type['ref'] == RULE_TYPE_BACKSTOP
class PayloadLookup(object):
def __init__(self, payload):
self._context = {
SYSTEM_KV_PREFIX: KeyValueLookup(),
TRIGGER_PAYLOAD_PREFIX: payload
}
def get_value(self, lookup_key):
expr = parse(lookup_key)
matches = [match.value for match in expr.find(self._context)]
if not matches:
return None
return matches
| apache-2.0 |
t-wissmann/qutebrowser | qutebrowser/mainwindow/tabbedbrowser.py | 1 | 40900 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import collections
import functools
import weakref
import typing
import attr
from PyQt5.QtWidgets import QSizePolicy, QWidget, QApplication
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from PyQt5.QtGui import QIcon
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget, mainwindow
from qutebrowser.browser import signalfilter, browsertab, history
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message, jinja)
from qutebrowser.misc import quitter
@attr.s
class UndoEntry:
"""Information needed for :undo."""
url = attr.ib()
history = attr.ib()
index = attr.ib()
pinned = attr.ib()
class TabDeque:
"""Class which manages the 'last visited' tab stack.
Instead of handling deletions by clearing old entries, they are handled by
checking if they exist on access. This allows us to save an iteration on
every tab delete.
Currently, we assume we will switch to the tab returned by any of the
getter functions. This is done because the on_switch functions will be
called upon switch, and we don't want to duplicate entries in the stack
for a single switch.
"""
def __init__(self) -> None:
self._stack = collections.deque(
maxlen=config.val.tabs.focus_stack_size
) # type: typing.Deque[weakref.ReferenceType[QWidget]]
# Items that have been removed from the primary stack.
self._stack_deleted = [
] # type: typing.List[weakref.ReferenceType[QWidget]]
self._ignore_next = False
self._keep_deleted_next = False
def on_switch(self, old_tab: QWidget) -> None:
"""Record tab switch events."""
if self._ignore_next:
self._ignore_next = False
self._keep_deleted_next = False
return
tab = weakref.ref(old_tab)
if self._stack_deleted and not self._keep_deleted_next:
self._stack_deleted = []
self._keep_deleted_next = False
self._stack.append(tab)
def prev(self, cur_tab: QWidget) -> QWidget:
"""Get the 'previous' tab in the stack.
Throws IndexError on failure.
"""
tab = None # type: typing.Optional[QWidget]
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack.pop()()
self._stack_deleted.append(weakref.ref(cur_tab))
self._ignore_next = True
return tab
def next(self, cur_tab: QWidget, *, keep_overflow=True) -> QWidget:
"""Get the 'next' tab in the stack.
Throws IndexError on failure.
"""
tab = None # type: typing.Optional[QWidget]
while tab is None or tab.pending_removal or tab is cur_tab:
tab = self._stack_deleted.pop()()
# On next tab-switch, current tab will be added to stack as normal.
# However, we shouldn't wipe the overflow stack as normal.
if keep_overflow:
self._keep_deleted_next = True
return tab
def last(self, cur_tab: QWidget) -> QWidget:
"""Get the last tab.
Throws IndexError on failure.
"""
try:
return self.next(cur_tab, keep_overflow=False)
except IndexError:
return self.prev(cur_tab)
def update_size(self) -> None:
"""Update the maxsize of this TabDeque."""
newsize = config.val.tabs.focus_stack_size
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self._stack = collections.deque(self._stack, maxlen=newsize)
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(QWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabs.new_tab_position set to 'prev'.
_tab_insert_idx_right: Same as above, for 'next'.
_undo_stack: List of lists of UndoEntry objects of closed tabs.
shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
is_private: Whether private browsing is on for this window.
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(usertypes.LoadStatus)
cur_fullscreen_requested = pyqtSignal(bool)
cur_caret_selection_toggled = pyqtSignal(bool)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, *, win_id, private, parent=None):
if private:
assert not qtutils.is_single_process()
super().__init__(parent)
self.widget = tabwidget.TabWidget(win_id, parent=self)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.shutting_down = False
self.widget.tabCloseRequested.connect( # type: ignore
self.on_tab_close_requested)
self.widget.new_tab_requested.connect(self.tabopen)
self.widget.currentChanged.connect( # type: ignore
self._on_current_changed)
self.cur_fullscreen_requested.connect(self.widget.tabBar().maybe_hide)
self.widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
self.cur_load_finished.connect(self._leave_modes_on_load)
else:
self.cur_load_started.connect(self._leave_modes_on_load)
# This init is never used, it is immediately thrown away in the next
# line.
self._undo_stack = (
collections.deque()
) # type: typing.MutableSequence[typing.MutableSequence[UndoEntry]]
self._update_stack_size()
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options = {} # type: typing.Mapping[str, typing.Any]
self._local_marks = {
} # type: typing.MutableMapping[QUrl, typing.MutableMapping[str, int]]
self._global_marks = {
} # type: typing.MutableMapping[str, typing.Tuple[int, QUrl]]
self.default_window_icon = self.widget.window().windowIcon()
self.is_private = private
self.tab_deque = TabDeque()
config.instance.changed.connect(self._on_config_changed)
quitter.instance.shutting_down.connect(self.shutdown)
def _update_stack_size(self):
newsize = config.instance.get('tabs.undo_stack_size')
if newsize < 0:
newsize = None
# We can't resize a collections.deque so just recreate it >:(
self._undo_stack = collections.deque(self._undo_stack, maxlen=newsize)
def __repr__(self):
return utils.get_repr(self, count=self.widget.count())
@pyqtSlot(str)
def _on_config_changed(self, option):
if option == 'tabs.favicons.show':
self._update_favicons()
elif option == 'window.title_format':
self._update_window_title()
elif option == 'tabs.undo_stack_size':
self._update_stack_size()
elif option in ['tabs.title.format', 'tabs.title.format_pinned']:
self.widget.update_tab_titles()
elif option == "tabs.focus_stack_size":
self.tab_deque.update_size()
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.widget.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
widgets = []
for i in range(self.widget.count()):
widget = self.widget.widget(i)
if widget is None:
log.webview.debug( # type: ignore
"Got None-widget in tabbedbrowser!")
else:
widgets.append(widget)
return widgets
def _update_window_title(self, field=None):
"""Change the window title to match the current tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
title_format = config.cache['window.title_format']
if field is not None and ('{' + field + '}') not in title_format:
return
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.widget.get_tab_fields(idx)
fields['id'] = self._win_id
title = title_format.format(**fields)
self.widget.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.fullscreen_requested.connect(
self._filter.create(self.cur_fullscreen_requested, tab))
tab.caret.selection_toggled.connect(
self._filter.create(self.cur_caret_selection_toggled, tab))
# misc
tab.scroller.perc_changed.connect(self._on_scroll_pos_changed)
tab.scroller.before_jump_requested.connect(lambda: self.set_mark("'"))
tab.url_changed.connect(
functools.partial(self._on_url_changed, tab))
tab.title_changed.connect(
functools.partial(self._on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self._on_icon_changed, tab))
tab.load_progress.connect(
functools.partial(self._on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self._on_load_finished, tab))
tab.load_started.connect(
functools.partial(self._on_load_started, tab))
tab.load_status_changed.connect(
functools.partial(self._on_load_status_changed, tab))
tab.window_close_requested.connect(
functools.partial(self._on_window_close_requested, tab))
tab.renderer_process_terminated.connect(
functools.partial(self._on_renderer_process_terminated, tab))
tab.audio.muted_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.audio.recently_audible_changed.connect(
functools.partial(self._on_audio_changed, tab))
tab.new_tab_requested.connect(self.tabopen)
if not self.is_private:
tab.history_item_triggered.connect(
history.web_history.add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.widget.currentIndex()
return self.widget.tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.shutting_down = True
# Reverse tabs so we don't have to recacluate tab titles over and over
# Removing first causes [2..-1] to be recomputed
# Removing the last causes nothing to be recomputed
for tab in reversed(self.widgets()):
self._remove_tab(tab)
def tab_close_prompt_if_pinned(
self, tab, force, yes_action,
text="Are you sure you want to close a pinned tab?"):
"""Helper method for tab_close.
If tab is pinned, prompt. If not, run yes_action.
If tab is destroyed, abort question.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text=text,
yes_action=yes_action, default=False, abort_on=[tab.destroyed])
else:
yes_action()
def close_tab(self, tab, *, add_undo=True, new_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
"""
last_close = config.val.tabs.last_close
count = self.widget.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab, add_undo=add_undo, new_undo=new_undo)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.load_url(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
for url in config.val.url.start_pages:
self.load_url(url, newtab=True)
elif last_close == 'default-page':
self.load_url(config.val.url.default_page, newtab=True)
def _remove_tab(self, tab, *, add_undo=True, new_undo=True, crashed=False):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
crashed: Whether we're closing a tab with crashed renderer process.
"""
idx = self.widget.indexOf(tab)
if idx == -1:
if crashed:
return
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
tab.pending_removal = True
if tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/qutebrowser/qutebrowser/issues/163
pass
elif not tab.url().isValid():
# We display a warning for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(tab.url(), "saving tab")
elif add_undo:
try:
history_data = tab.history.private_api.serialize()
except browsertab.WebTabError:
pass # special URL
else:
entry = UndoEntry(tab.url(), history_data, idx,
tab.data.pinned)
if new_undo or not self._undo_stack:
self._undo_stack.append([entry])
else:
self._undo_stack[-1].append(entry)
tab.private_api.shutdown()
self.widget.removeTab(idx)
if not crashed:
# WORKAROUND for a segfault when we delete the crashed tab.
# see https://bugreports.qt.io/browse/QTBUG-58698
tab.layout().unwrap()
tab.deleteLater()
def undo(self):
"""Undo removing of a tab or tabs."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.val.tabs.last_close
use_current_tab = False
if last_close in ['blank', 'startpage', 'default-page']:
only_one_tab_open = self.widget.count() == 1
no_history = len(self.widget.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': config.val.url.start_pages[0],
'default-page': config.val.url.default_page,
}
first_tab_url = self.widget.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
for entry in reversed(self._undo_stack.pop()):
if use_current_tab:
newtab = self.widget.widget(0)
use_current_tab = False
else:
# FIXME:typing mypy thinks this is None due to @pyqtSlot
newtab = typing.cast(
browsertab.AbstractTab,
self.tabopen(background=False, idx=entry.index))
newtab.history.private_api.deserialize(entry.history)
self.widget.set_tab_pinned(newtab, entry.pinned)
@pyqtSlot('QUrl', bool)
def load_url(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.widget.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.widget.currentWidget().load_url(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore
"Got invalid tab {} for index {}!".format(tab, idx))
return
self.tab_close_prompt_if_pinned(
tab, False, lambda: self.close_tab(tab))
@pyqtSlot(browsertab.AbstractTab)
def _on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
@pyqtSlot('QUrl', bool, bool)
def tabopen(
self, url: QUrl = None,
background: bool = None,
related: bool = True,
idx: int = None,
) -> browsertab.AbstractTab:
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the `tabs.background` setting decides.
related: Whether the tab was opened from another existing tab.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current (related=True).
- Explicitly opened tabs are at the very right
(related=False)
idx: The index where the new tab should be opened.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}, background {}, "
"related {}, idx {}".format(
url, background, related, idx))
prev_focus = QApplication.focusWidget()
if config.val.tabs.tabs_are_windows and self.widget.count() > 0:
window = mainwindow.MainWindow(private=self.is_private)
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url=url, background=background,
related=related)
tab = browsertab.create(win_id=self._win_id,
private=self.is_private,
parent=self.widget)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(related)
self.widget.insertTab(idx, tab, "")
if url is not None:
tab.load_url(url)
if background is None:
background = config.val.tabs.background
if background:
# Make sure the background tab has the correct initial size.
# With a foreground tab, it's going to be resized correctly by the
# layout anyways.
tab.resize(self.widget.currentWidget().size())
self.widget.tab_index_changed.emit(self.widget.currentIndex(),
self.widget.count())
# Refocus webview in case we lost it by spawning a bg tab
self.widget.currentWidget().setFocus()
else:
self.widget.setCurrentWidget(tab)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68076
# Still seems to be needed with Qt 5.11.1
tab.setFocus()
mode = modeman.instance(self._win_id).mode
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
# If we were in a command prompt, restore old focus
# The above commands need to be run to switch tabs
if prev_focus is not None:
prev_focus.setFocus()
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, related):
"""Get the index of a tab to insert.
Args:
related: Whether the tab was opened from another tab (as a "child")
Return:
The index of the new tab.
"""
if related:
pos = config.val.tabs.new_position.related
else:
pos = config.val.tabs.new_position.unrelated
if pos == 'prev':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a
# tab *before* the currently focused tab, indices will shift by
# 1 automatically.
else:
idx = self.widget.currentIndex()
elif pos == 'next':
if config.val.tabs.new_position.stacking:
idx = self._tab_insert_idx_right
else:
idx = self.widget.currentIndex() + 1
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid tabs.new_position '{}'.".format(pos))
log.webview.debug("tabs.new_position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
def _update_favicons(self):
"""Update favicons when config was changed."""
for tab in self.widgets():
self.widget.update_tab_favicon(tab)
@pyqtSlot()
def _on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
if (config.cache['tabs.tabs_are_windows'] and
tab.data.should_show_icon()):
self.widget.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def _on_load_status_changed(self, tab):
"""Update tab/window titles if the load status changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot()
def _leave_modes_on_load(self):
"""Leave insert/hint mode when loading started."""
try:
url = self.current_url()
if not url.isValid():
url = None
except qtutils.QtValueError:
url = None
if config.instance.get('input.insert_mode.leave_on_load',
url=url):
modeman.leave(self._win_id, usertypes.KeyMode.insert,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
if config.cache['hints.leave_on_load']:
modeman.leave(self._win_id, usertypes.KeyMode.hint,
'load started', maybe=True)
else:
log.modes.debug("Ignoring leave_on_load request due to setting.")
@pyqtSlot(browsertab.AbstractTab, str)
def _on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.widget.set_page_title(idx, text)
if idx == self.widget.currentIndex():
self._update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def _on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.widget.page_title(idx):
self.widget.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab, QIcon)
def _on_icon_changed(self, tab, icon):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
icon: The new icon
"""
if not tab.data.should_show_icon():
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.setTabIcon(idx, icon)
if config.val.tabs.tabs_are_windows:
self.widget.window().setWindowIcon(icon)
@pyqtSlot(usertypes.KeyMode)
def on_mode_entered(self, mode):
"""Save input mode when tabs.mode_on_change = restore."""
if (config.val.tabs.mode_on_change == 'restore' and
mode in modeman.INPUT_MODES):
tab = self.widget.currentWidget()
if tab is not None:
tab.data.input_mode = mode
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
widget = self.widget.currentWidget()
if widget is None:
return # type: ignore
if mode in [usertypes.KeyMode.command] + modeman.PROMPT_MODES:
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
widget.setFocus()
if config.val.tabs.mode_on_change == 'restore':
widget.data.input_mode = usertypes.KeyMode.normal
@pyqtSlot(int)
def _on_current_changed(self, idx):
"""Add prev tab to stack and leave hinting mode when focus changed."""
mode_on_change = config.val.tabs.mode_on_change
if idx == -1 or self.shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget.widget(idx)
if tab is None:
log.webview.debug( # type: ignore
"on_current_changed got called with invalid index {}"
.format(idx))
return
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
modes_to_leave = [usertypes.KeyMode.hint, usertypes.KeyMode.caret]
mm_instance = modeman.instance(self._win_id)
current_mode = mm_instance.mode
log.modes.debug("Mode before tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
if mode_on_change == 'normal':
modes_to_leave += modeman.INPUT_MODES
for mode in modes_to_leave:
modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if (mode_on_change == 'restore' and
current_mode not in modeman.PROMPT_MODES):
modeman.enter(self._win_id, tab.data.input_mode, 'restore')
if self._now_focused is not None:
self.tab_deque.on_switch(self._now_focused)
log.modes.debug("Mode after tab change: {} (mode_on_change = {})"
.format(current_mode.name, mode_on_change))
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self._update_window_title)
self._tab_insert_idx_left = self.widget.currentIndex()
self._tab_insert_idx_right = self.widget.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def _on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = utils.interpolate_color(start, stop, perc, system)
self.widget.set_tab_indicator_color(idx, color)
self.widget.update_tab_title(idx)
if idx == self.widget.currentIndex():
self._update_window_title()
def _on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.cache['colors.tabs.indicator.start']
stop = config.cache['colors.tabs.indicator.stop']
system = config.cache['colors.tabs.indicator.system']
color = utils.interpolate_color(start, stop, 100, system)
else:
color = config.cache['colors.tabs.indicator.error']
self.widget.set_tab_indicator_color(idx, color)
if idx == self.widget.currentIndex():
tab.private_api.handle_auto_insert_mode(ok)
@pyqtSlot()
def _on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
idx = self.widget.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating scroll position because index is "
"-1")
return
self._update_window_title('scroll_pos')
self.widget.update_tab_title(idx, 'scroll_pos')
def _on_audio_changed(self, tab, _muted):
"""Update audio field in tab when mute or recentlyAudible changed."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.widget.update_tab_title(idx, 'audio')
if idx == self.widget.currentIndex():
self._update_window_title('audio')
def _on_renderer_process_terminated(self, tab, status, code):
"""Show an error when a renderer process terminated."""
if status == browsertab.TerminationStatus.normal:
return
messages = {
browsertab.TerminationStatus.abnormal:
"Renderer process exited with status {}".format(code),
browsertab.TerminationStatus.crashed:
"Renderer process crashed",
browsertab.TerminationStatus.killed:
"Renderer process was killed",
browsertab.TerminationStatus.unknown:
"Renderer process did not start",
}
msg = messages[status]
def show_error_page(html):
tab.set_html(html)
log.webview.error(msg)
if qtutils.version_check('5.9', compiled=False):
url_string = tab.url(requested=True).toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading {}".format(url_string),
url=url_string, error=msg)
QTimer.singleShot(100, lambda: show_error_page(error_page))
else:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58698
message.error(msg)
self._remove_tab(tab, crashed=True)
if self.widget.count() == 0:
self.tabopen(QUrl('about:blank'))
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error("Failed to set mark: url invalid")
return
point = self.widget.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.widget.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
"""Scroll once loading finished."""
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.load_url(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error("Mark {} is not set".format(key))
elif urlkey is None:
message.error("Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
tab.scroller.before_jump_requested.emit()
tab.scroller.to_point(point)
else:
message.error("Mark {} is not set".format(key))
| gpl-3.0 |
csachs/openmicroscopy | components/tools/OmeroWeb/omeroweb/urls.py | 7 | 3131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.conf import settings
from django.conf.urls import url, patterns, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
from django.views.generic import RedirectView
from django.views.decorators.cache import never_cache
# error handler
handler404 = "omeroweb.feedback.views.handler404"
handler500 = "omeroweb.feedback.views.handler500"
reverse_lazy = lazy(reverse, str)
def redirect_urlpatterns():
"""
Helper function to return a URL pattern for index page http://host/.
"""
if settings.INDEX_TEMPLATE is None:
return patterns(
'',
url(r'^$', never_cache(
RedirectView.as_view(url=reverse_lazy('webindex'),
permanent=True)),
name="index")
)
else:
return patterns(
'',
url(r'^$', never_cache(
RedirectView.as_view(url=reverse_lazy('webindex_custom'),
permanent=True)),
name="index"),
)
# url patterns
urlpatterns = patterns(
'',
(r'^favicon\.ico$',
lambda request: redirect('%swebgateway/img/ome.ico'
% settings.STATIC_URL)),
(r'^(?i)webgateway/', include('omeroweb.webgateway.urls')),
(r'^(?i)webadmin/', include('omeroweb.webadmin.urls')),
(r'^(?i)webclient/', include('omeroweb.webclient.urls')),
(r'^(?i)url/', include('omeroweb.webredirect.urls')),
(r'^(?i)feedback/', include('omeroweb.feedback.urls')),
url(r'^index/$', 'omeroweb.webclient.views.custom_index',
name="webindex_custom"),
)
urlpatterns += redirect_urlpatterns()
for app in settings.ADDITIONAL_APPS:
# Depending on how we added the app to INSTALLED_APPS in settings.py,
# include the urls the same way
if 'omeroweb.%s' % app in settings.INSTALLED_APPS:
urlmodule = 'omeroweb.%s.urls' % app
else:
urlmodule = '%s.urls' % app
regex = '^(?i)%s/' % app
urlpatterns += patterns('', (regex, include(urlmodule)),)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
| gpl-2.0 |
nthien/pulp | server/test/unit/server/webservices/views/test_permissions.py | 14 | 10904 | import json
import unittest
import mock
from base import assert_auth_EXECUTE, assert_auth_READ
from pulp.server.exceptions import MissingValue
from pulp.server.webservices.views.permissions import (GrantToRoleView, GrantToUserView,
PermissionView, RevokeFromRoleView,
RevokeFromUserView, _validate_params)
class TestPermissionsView(unittest.TestCase):
"""
Test permissions view.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.permissions.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_get_all_permissions(self, mock_f, mock_resp):
"""
Test the permissions retrieval.
"""
perm = [{'resource': '/v2/some/', 'id': '1234',
'users': [{'username': 'test-user', 'permissions': [0]}]}]
mock_f.permission_query_manager.return_value.find_all.return_value = perm
mock_f.permission_manager.return_value.operation_value_to_name.return_value = 'READ'
request = mock.MagicMock()
request.GET = {}
permission = PermissionView()
response = permission.get(request)
expected_cont = [{'id': '1234', 'resource': '/v2/some/', 'users': {'test-user': ['READ']}}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.permissions.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_get_resource_permission(self, mock_f, mock_resp):
"""
Test specific resource permissions retrieval.
"""
perm = {'resource': '/v2/some/', 'id': '1234',
'users': [{'username': 'test-user', 'permissions': [0]}]}
mock_f.permission_query_manager.return_value.find_by_resource.return_value = perm
mock_f.permission_manager.return_value.operation_value_to_name.return_value = 'READ'
request = mock.MagicMock()
request.body = json.dumps({'resource': '/v2/some/'})
permission = PermissionView()
response = permission.get(request)
expected_cont = [{'id': '1234', 'resource': '/v2/some/', 'users': {'test-user': ['READ']}}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
class TestGrantToUserView(unittest.TestCase):
"""
Test grant permission to user.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
@mock.patch('pulp.server.webservices.views.permissions.generate_json_response')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_grant_to_user(self, mock_factory, mock_resp):
"""
Test grant permissions to user.
"""
request = mock.MagicMock()
request.body = json.dumps(
{'operations': ['READ'], 'login': 'test', 'resource': '/v2/some/'})
mock_factory.permission_manager.return_value.grant.return_value = None
mock_factory.permission_manager.return_value.operation_names_to_values.return_value = [0]
grant = GrantToUserView()
response = grant.post(request)
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
mock_factory.permission_manager.return_value.grant.assert_called_once_with(
'/v2/some/', 'test', [0])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
def test_grant_to_user_invalid_param(self):
"""
Test grant permissions to user with missing required params.
"""
request = mock.MagicMock()
request.body = json.dumps({'operations': ['READ'], 'resource': '/v2/some/'})
grant = GrantToUserView()
try:
grant.post(request)
except MissingValue, response:
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['login'])
else:
raise AssertionError("MissingValue should be raised with missing params")
class TestRevokeFromUserView(unittest.TestCase):
"""
Test revoke permission from user.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
@mock.patch('pulp.server.webservices.views.permissions.generate_json_response')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_revoke_from_user(self, mock_factory, mock_resp):
"""
Test revoke permissions from user.
"""
request = mock.MagicMock()
request.body = json.dumps(
{'operations': ['READ'], 'login': 'test', 'resource': '/v2/some/'})
mock_factory.permission_manager.return_value.revoke.return_value = None
mock_factory.permission_manager.return_value.operation_names_to_values.return_value = [0]
revoke = RevokeFromUserView()
response = revoke.post(request)
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
mock_factory.permission_manager.return_value.revoke.assert_called_once_with(
'/v2/some/', 'test', [0])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
def test_revoke_from_user_invalid_param(self):
"""
Test revoke permissions from user with missing required params.
"""
request = mock.MagicMock()
request.body = json.dumps({'operations': ['READ'], 'resource': '/v2/some/'})
revoke = RevokeFromUserView()
try:
revoke.post(request)
except MissingValue, response:
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['login'])
else:
raise AssertionError("MissingValue should be raised with missing params")
class TestGrantToRoleView(unittest.TestCase):
"""
Test grant permission to role.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
@mock.patch('pulp.server.webservices.views.permissions.generate_json_response')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_grant_to_role(self, mock_factory, mock_resp):
"""
Test grant permissions to role.
"""
request = mock.MagicMock()
request.body = json.dumps(
{'operations': ['READ'], 'role_id': 'test', 'resource': '/v2/some/'})
mock_factory.role_manager.return_value.add_permissions_to_role.return_value = None
mock_factory.permission_manager.return_value.operation_names_to_values.return_value = [0]
grant = GrantToRoleView()
response = grant.post(request)
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
mock_factory.role_manager.return_value.add_permissions_to_role.assert_called_once_with(
'test', '/v2/some/', [0])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
def test_grant_to_role_invalid_param(self):
"""
Test grant permissions to role with missing required params.
"""
request = mock.MagicMock()
request.body = json.dumps({'operations': ['READ'], 'resource': '/v2/some/'})
grant = GrantToRoleView()
try:
grant.post(request)
except MissingValue, response:
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['role_id'])
else:
raise AssertionError("MissingValue should be raised with missing params")
class TestRevokeFromRoleView(unittest.TestCase):
"""
Test revoke permission from role.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
@mock.patch('pulp.server.webservices.views.permissions.generate_json_response')
@mock.patch('pulp.server.webservices.views.permissions.factory')
def test_revoke_from_role(self, mock_factory, mock_resp):
"""
Test revoke permissions from role.
"""
request = mock.MagicMock()
request.body = json.dumps(
{'operations': ['READ'], 'role_id': 'test', 'resource': '/v2/some/'})
mock_factory.role_manager.return_value.remove_permissions_from_role.return_value = None
mock_factory.permission_manager.return_value.operation_names_to_values.return_value = [0]
revoke = RevokeFromRoleView()
response = revoke.post(request)
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
mock_factory.role_manager.return_value.remove_permissions_from_role.assert_called_once_with(
'test', '/v2/some/', [0])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_EXECUTE())
def test_revoke_from_role_invalid_param(self):
"""
Test revoke permissions from role with missing required params.
"""
request = mock.MagicMock()
request.body = json.dumps({'operations': ['READ'], 'resource': '/v2/some/'})
revoke = RevokeFromRoleView()
try:
revoke.post(request)
except MissingValue, response:
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['role_id'])
else:
raise AssertionError("MissingValue should be raised with missing params")
class Test__validate_params(unittest.TestCase):
def test_validate_params(self):
"""
Test the missing value is raised if some required params are missing.
"""
params = {'login': None, 'resource': None, 'role_id': 'some_role'}
try:
_validate_params(params)
except MissingValue, response:
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['login', 'resource'])
else:
raise AssertionError("MissingValue should be raised with missing params")
| gpl-2.0 |
academic-colab/maslo-server | FTS.py | 1 | 5261 | #!/usr/bin/env python
'''
/******************************************************************************
* FTS.py
*
* Copyright (c) 2011-2012, Academic ADL Co-Lab, University of Wisconsin-Extension
* http://www.academiccolab.org/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*****************************************************************************/
'''
import sys
import os
import json
import urllib2
import re
import sqlite3 as dbs
## Reads a json file and returns a json object
def getJSON(path, isRelative=True):
if isRelative :
fPath = path + "/manifest"
else :
fPath = path
try :
f = open(fPath)
except :
print "File ", fPath, " cannot be opened."
return None
else :
data = json.load(f)
f.close()
return data
## Strip string passed as argument from common stopwords
def removeStopWords(text):
stopwords = ""
try :
f = open("stopwords.txt", "r")
except :
f = urllib2.urlopen('http://www.textfixer.com/resources/common-english-words.txt')
stopwords = f.read()
f = open("stopwords.txt", "w")
f.write(stopwords)
f.close()
else :
stopwords = f.read()
f.close()
stopwords = stopwords.strip().split(",")
for stopword in stopwords :
pattern = re.compile(r"\b%s\b"%stopword, re.IGNORECASE)
text = pattern.sub("", text)
pattern = re.compile("[\s]+")
text = pattern.sub(" ", text)
return text
## Create full text search table for pack contents
def createTable(db):
statement = "CREATE VIRTUAL TABLE content_search using FTS3(pack,section,content,tokenize=porter);"
try :
db.execute(statement)
db.commit()
except:
pass
## Create basic content pack table
def createTableUpper(db):
statement = "CREATE TABLE content (pack text, path text, version text, author text, public int DEFAULT 0, category text);"
try :
db.execute(statement)
db.commit()
except:
pass
## insert data into content pack tables - FTS and basic
def insertData(pack, path, db, zipName=None, versionPath=None, author=None):
data = getJSON(path)
query = "INSERT INTO content_search(pack, section, content) VALUES (?,?,?)"
query2 = "INSERT INTO content(pack, path, version, author, category) VALUES (?,?,?,?,?)"
if zipName :
version = "0"
category = ""
authorVal = ""
if versionPath is not None and author is not None :
print versionPath
versionData = getJSON(versionPath, False)
if versionData and "version" in versionData :
version = versionData["version"]
if versionData and "category" in versionData :
category = versionData["category"]
authorVal = author
try :
zn = zipName.replace("qDir-", "")
db.execute(query2, (pack.decode('utf-8'), zn.decode('utf-8'),version, authorVal.decode('utf-8'), category))
except Exception, e:
print "Insert failed: ",pack, zn, version, authorVal
print e
pass
pattern = re.compile("<[^>]+>")
print data
for entry in data :
title = entry["title"]
normalTitle = removeStopWords(title)
try :
db.execute(query, (pack.decode('utf-8'), title, normalTitle,))
except Exception, e:
print "error:", e
return
text = None
uPath = path.decode('utf-8')
if entry["type"] == "text" :
newPath = uPath+"/../"+entry["path"]
f = open(newPath)
text = f.read().strip()
f.close()
else :
newPath = uPath+"/../"+ entry["path"]+".dsc"
try :
f = open(newPath)
text = f.read().strip()
f.close()
except :
pass
if text is not None:
text = text.decode('utf-8')
text = pattern.sub(" ", text)
text = removeStopWords(text)
try :
db.execute(query, (pack.decode('utf-8'), title, text,))
except Exception, e:
print "error:", e
return
db.commit()
## Create tables if they don't exist, index argument-passed content pack, create database entries
def main(pathToManifest, PackName, pathToGlobalSearch=None, zipName=None, versionPath=None, author=None):
db = dbs.connect(pathToManifest+"/search.db")
createTable(db)
insertData(PackName, pathToManifest, db)
db.close()
if (pathToGlobalSearch) :
db = dbs.connect(pathToGlobalSearch+"/search.db")
createTable(db)
createTableUpper(db)
insertData(PackName, pathToManifest, db, zipName,versionPath, author)
db.close()
## And now ... get to work.
if __name__ == "__main__" :
path = sys.argv[1]
pack = sys.argv[2]
globalDb = None
zipName = None
versionPath = None
author = None
if len(sys.argv) > 3 :
globalDb = sys.argv[3]
if len(sys.argv) > 4 :
zipName = sys.argv[4]
if len(sys.argv) > 5 :
versionPath = sys.argv[5]
author = sys.argv[6]
main(path, pack, globalDb, zipName, versionPath, author)
| gpl-3.0 |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/django/core/cache/backends/locmem.py | 46 | 4062 | "Thread-safe in-memory cache backend."
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache
from django.utils.synch import RWLock
class CacheClass(BaseCache):
def __init__(self, _, params):
BaseCache.__init__(self, params)
self._cache = {}
self._expire_info = {}
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self._lock = RWLock()
def add(self, key, value, timeout=None):
self.validate_key(key)
self._lock.writer_enters()
try:
exp = self._expire_info.get(key)
if exp is None or exp <= time.time():
try:
self._set(key, pickle.dumps(value), timeout)
return True
except pickle.PickleError:
pass
return False
finally:
self._lock.writer_leaves()
def get(self, key, default=None):
self.validate_key(key)
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return default
elif exp > time.time():
try:
return pickle.loads(self._cache[key])
except pickle.PickleError:
return default
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
finally:
self._lock.writer_leaves()
def _set(self, key, value, timeout=None):
if len(self._cache) >= self._max_entries:
self._cull()
if timeout is None:
timeout = self.default_timeout
self._cache[key] = value
self._expire_info[key] = time.time() + timeout
def set(self, key, value, timeout=None):
self.validate_key(key)
self._lock.writer_enters()
# Python 2.4 doesn't allow combined try-except-finally blocks.
try:
try:
self._set(key, pickle.dumps(value), timeout)
except pickle.PickleError:
pass
finally:
self._lock.writer_leaves()
def has_key(self, key):
self.validate_key(key)
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return False
elif exp > time.time():
return True
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
finally:
self._lock.writer_leaves()
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key):
self.validate_key(key)
self._lock.writer_enters()
try:
self._delete(key)
finally:
self._lock.writer_leaves()
def clear(self):
self._cache.clear()
self._expire_info.clear()
| gpl-3.0 |
madformuse/server | server/natpacketserver.py | 1 | 1848 | import socket
from server.subscribable import Subscribable
from .decorators import with_logger
@with_logger
class NatPacketServer(Subscribable):
def __init__(self, loop, port):
super().__init__()
self.loop = loop
self.port = port
self._logger.debug("{id} Listening on {port}".format(id=id(self), port=port))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.setblocking(False)
loop.add_reader(s.fileno(), self._recv)
self._socket = s
self._subscribers = {}
def close(self):
self.loop.remove_reader(self._recv())
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as ex:
self._logger.exception(ex)
finally:
self._socket.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _recv(self):
try:
data, addr = self._socket.recvfrom(512)
self._logger.debug("Received UDP {} from {}".format(data, addr))
if data[0] == 0x8:
self._logger.debug("Emitting with: {} {} {} ".format(data[1:].decode(),
addr[0], addr[1]))
self.notify({
'command_id': 'ProcessServerNatPacket',
'arguments': ["{}:{}".format(addr[0], addr[1]), data[1:].decode()]
})
self._socket.sendto(b"\x08OK", addr)
except OSError as ex:
if ex.errno == socket.EWOULDBLOCK:
pass
else:
self._logger.critical(ex)
raise ex
except Exception as ex:
self._logger.critical(ex)
raise ex
| gpl-3.0 |
spinellic/Mission-Planner | Lib/distutils/cygwinccompiler.py | 54 | 17719 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
| gpl-3.0 |
bagel/mongo-web-shell | webapps/lib/db.py | 3 | 1457 | # Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urlparse import urlparse
from flask import current_app
import pymongo
from MWSServerError import MWSServerError
_logger = logging.getLogger(__name__)
db = None
def get_db(MWSExceptions=True):
global db
config = current_app.config
# TODO: Ensure MongoClient connection is still active.
if db:
return db
try:
client = pymongo.MongoClient(
config.get('DB_HOST'),
config.get('DB_PORT'))
db = client[config.get('DB_NAME')]
if 'username' in config:
db.authenticate(config.get('username'), config.get('password'))
return db
except Exception as e:
if MWSExceptions:
debug = config['DEBUG']
msg = str(e) if debug else 'An unexpected error occurred.'
raise MWSServerError(500, msg)
raise
| apache-2.0 |
SandPox/android_kernel_samsung_kyleproxx | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
belmiromoreira/nova | nova/filters.py | 30 | 3113 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter support
"""
from oslo_log import log as logging
from nova.i18n import _LI
from nova import loadables
LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overridden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
# Set to true in a subclass if a filter only needs to be run once
# for each request rather than for each instance
run_filter_once_per_request = False
def run_filter_for_index(self, index):
"""Return True if the filter needs to be run for the "index-th"
instance in a request. Only need to override this if a filter
needs anything other than "first only" or "all" behaviour.
"""
if self.run_filter_once_per_request and index > 0:
return False
else:
return True
class BaseFilterHandler(loadables.BaseLoader):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filters, objs, filter_properties, index=0):
list_objs = list(objs)
LOG.debug("Starting with %d host(s)", len(list_objs))
for filter_ in filters:
if filter_.run_filter_for_index(index):
cls_name = filter_.__class__.__name__
objs = filter_.filter_all(list_objs, filter_properties)
if objs is None:
LOG.debug("Filter %s says to stop filtering", cls_name)
return
list_objs = list(objs)
if not list_objs:
LOG.info(_LI("Filter %s returned 0 hosts"), cls_name)
break
LOG.debug("Filter %(cls_name)s returned "
"%(obj_len)d host(s)",
{'cls_name': cls_name, 'obj_len': len(list_objs)})
return list_objs
| apache-2.0 |
tdliu/hoop-picks | lib/requests/sessions.py | 115 | 26216 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
self.rebuild_method(prepared_request, resp)
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/kennethreitz/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""
Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Resolve URL in redirect cache, if available.
if allow_redirects:
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""
Returns a :class:`Session` for context-management.
:rtype: Session
"""
return Session()
| apache-2.0 |
PierreRaybaut/PythonQwt | doc/symbol_path_example.py | 1 | 1089 | from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from qtpy import QtCore as QC
import qwt
import numpy as np
import os.path as osp
app = QW.QApplication([])
# --- Construct custom symbol ---
path = QG.QPainterPath()
path.moveTo(0, 8)
path.lineTo(0, 5)
path.lineTo(-3, 5)
path.lineTo(0, 0)
path.lineTo(3, 5)
path.lineTo(0, 5)
transform = QG.QTransform()
transform.rotate(-30.0)
path = transform.map(path)
pen = QG.QPen(QC.Qt.black, 2)
pen.setJoinStyle(QC.Qt.MiterJoin)
symbol = qwt.QwtSymbol()
symbol.setPen(pen)
symbol.setBrush(QC.Qt.red)
symbol.setPath(path)
symbol.setPinPoint(QC.QPointF(0.0, 0.0))
symbol.setSize(10, 14)
# --- Test it within a simple plot ---
curve = qwt.QwtPlotCurve()
curve_pen = QG.QPen(QC.Qt.blue)
curve_pen.setStyle(QC.Qt.DotLine)
curve.setPen(curve_pen)
curve.setSymbol(symbol)
x = np.linspace(0, 10, 10)
curve.setData(x, np.sin(x))
plot = qwt.QwtPlot()
curve.attach(plot)
plot.resize(600, 300)
plot.replot()
plot.show()
plot.grab().save(
osp.join(osp.abspath(osp.dirname(__file__)), "images", "symbol_path_example.png")
)
app.exec_()
| lgpl-2.1 |
ecederstrand/django | django/test/client.py | 132 | 26745 | from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
if backend is None:
backend = settings.AUTHENTICATION_BACKENDS[0]
user.backend = backend
self._login(user)
def _login(self, user):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| bsd-3-clause |
sayakbiswas/CloudLang | CloudLangNeuralNets/NER/utils.py | 2 | 8718 | import os
import re
import codecs
import numpy as np
import theano
models_path = "./models"
eval_path = "./evaluation"
eval_temp = os.path.join(eval_path, "temp")
eval_script = os.path.join(eval_path, "conlleval")
def get_name(parameters):
"""
Generate a model name from its parameters.
"""
l = []
for k, v in parameters.items():
if type(v) is str and "/" in v:
l.append((k, v[::-1][:v[::-1].index('/')][::-1]))
else:
l.append((k, v))
name = ",".join(["%s=%s" % (k, str(v).replace(',', '')) for k, v in l])
return "".join(i for i in name if i not in "\/:*?<>|")
def set_values(name, param, pretrained):
"""
Initialize a network parameter with pretrained values.
We check that sizes are compatible.
"""
param_value = param.get_value()
if pretrained.size != param_value.size:
raise Exception(
"Size mismatch for parameter %s. Expected %i, found %i."
% (name, param_value.size, pretrained.size)
)
param.set_value(np.reshape(
pretrained, param_value.shape
).astype(np.float32))
def shared(shape, name):
"""
Create a shared object of a numpy array.
"""
if len(shape) == 1:
value = np.zeros(shape) # bias are initialized with zeros
else:
drange = np.sqrt(6. / (np.sum(shape)))
value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
return theano.shared(value=value.astype(theano.config.floatX), name=name)
def create_dico(item_list):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
dico = {}
for items in item_list:
for item in items:
if item not in dico:
dico[item] = 1
else:
dico[item] += 1
return dico
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item
def zero_digits(s):
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
def iob2(tags):
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def iobes_iob(tags):
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words
def pad_word_chars(words):
"""
Pad the characters of the words in a sentence.
Input:
- list of lists of ints (list of words, a word being a list of char indexes)
Output:
- padded list of lists of ints
- padded list of lists of ints (where chars are reversed)
- list of ints corresponding to the index of the last character of each word
"""
max_length = max([len(word) for word in words])
char_for = []
char_rev = []
char_pos = []
for word in words:
padding = [0] * (max_length - len(word))
char_for.append(word + padding)
char_rev.append(word[::-1] + padding)
char_pos.append(len(word) - 1)
return char_for, char_rev, char_pos
def create_input(data, parameters, add_label, singletons=None):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
words = data['words']
chars = data['chars']
if singletons is not None:
words = insert_singletons(words, singletons)
if parameters['cap_dim']:
caps = data['caps']
char_for, char_rev, char_pos = pad_word_chars(chars)
input = []
if parameters['word_dim']:
input.append(words)
if parameters['char_dim']:
input.append(char_for)
if parameters['char_bidirect']:
input.append(char_rev)
input.append(char_pos)
if parameters['cap_dim']:
input.append(caps)
if add_label:
input.append(data['tags'])
return input
def evaluate(parameters, f_eval, raw_sentences, parsed_sentences,
id_to_tag, dictionary_tags):
"""
Evaluate current model using CoNLL script.
"""
n_tags = len(id_to_tag)
predictions = []
count = np.zeros((n_tags, n_tags), dtype=np.int32)
for raw_sentence, data in zip(raw_sentences, parsed_sentences):
input = create_input(data, parameters, False)
if parameters['crf']:
y_preds = np.array(f_eval(*input))[1:-1]
else:
y_preds = f_eval(*input).argmax(axis=1)
y_reals = np.array(data['tags']).astype(np.int32)
assert len(y_preds) == len(y_reals)
p_tags = [id_to_tag[y_pred] for y_pred in y_preds]
r_tags = [id_to_tag[y_real] for y_real in y_reals]
if parameters['tag_scheme'] == 'iobes':
p_tags = iobes_iob(p_tags)
r_tags = iobes_iob(r_tags)
for i, (y_pred, y_real) in enumerate(zip(y_preds, y_reals)):
new_line = " ".join(raw_sentence[i][:-1] + [r_tags[i], p_tags[i]])
predictions.append(new_line)
count[y_real, y_pred] += 1
predictions.append("")
# Write predictions to disk and run CoNLL script externally
eval_id = np.random.randint(1000000, 2000000)
output_path = os.path.join(eval_temp, "eval.%i.output" % eval_id)
scores_path = os.path.join(eval_temp, "eval.%i.scores" % eval_id)
with codecs.open(output_path, 'w', 'utf8') as f:
f.write("\n".join(predictions))
os.system("%s < %s > %s" % (eval_script, output_path, scores_path))
# CoNLL evaluation results
eval_lines = [l.rstrip() for l in codecs.open(scores_path, 'r', 'utf8')]
for line in eval_lines:
print line
# Remove temp files
# os.remove(output_path)
# os.remove(scores_path)
# Confusion matrix with accuracy for each tag
print ("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * n_tags)).format(
"ID", "NE", "Total",
*([id_to_tag[i] for i in xrange(n_tags)] + ["Percent"])
)
for i in xrange(n_tags):
print ("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * n_tags)).format(
str(i), id_to_tag[i], str(count[i].sum()),
*([count[i][j] for j in xrange(n_tags)] +
["%.3f" % (count[i][i] * 100. / max(1, count[i].sum()))])
)
# Global accuracy
print "%i/%i (%.5f%%)" % (
count.trace(), count.sum(), 100. * count.trace() / max(1, count.sum())
)
# F1 on all entities
return float(eval_lines[1].strip().split()[-1])
| mit |
martinwicke/tensorflow | tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py | 20 | 2885 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the gradient of `tf.sparse_tensor_dense_matmul()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
def _sparsify(self, x):
x[x < 0.5] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(np.int64).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
def _randomTensor(self, size, np_dtype, adjoint=False, sparse=False):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
if adjoint:
x = x.transpose()
if sparse:
return self._sparsify(x)
else:
return tf.constant(x, dtype=np_dtype)
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype):
n, k, m = np.random.randint(1, 10, size=3)
sp_t, nnz = self._randomTensor(
[n, k], np_dtype, adjoint=adjoint_a, sparse=True)
dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)
matmul = tf.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=True):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
err = tf.test.compute_gradient_error([dense_t, sp_t.values],
[dense_t_shape, sp_t_val_shape],
matmul, [n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
def _testGradientsType(self, np_dtype):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
name = "sparse_tensor_dense_matmul_%s_%s_%s" % (adjoint_a, adjoint_b,
np_dtype.__name__)
self._testGradients(adjoint_a, adjoint_b, name, np_dtype)
def testGradients(self):
np.random.seed(5) # Fix seed to avoid flakiness
self._testGradientsType(np.float32)
self._testGradientsType(np.float64)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.